Repository: vmware/ansible-for-nsxt Branch: master Commit: 57b2df630fab Files: 241 Total size: 1.2 MB Directory structure: gitextract_w6d1_2mv/ ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── galaxy.yml ├── meta/ │ └── runtime.yml ├── plugins/ │ ├── doc_fragments/ │ │ └── vmware_nsxt.py │ ├── module_utils/ │ │ ├── __init__.py │ │ ├── common_utils.py │ │ ├── nsxt_base_resource.py │ │ ├── nsxt_resource_urls.py │ │ ├── policy_communicator.py │ │ ├── policy_resource_specs/ │ │ │ ├── __init__.py │ │ │ ├── l2_bridge_ep_profile.py │ │ │ └── security_policy.py │ │ ├── upgrade_reverse_order.py │ │ ├── vcenter_utils.py │ │ └── vmware_nsxt.py │ └── modules/ │ ├── __init__.py │ ├── nsxt_certificates.py │ ├── nsxt_certificates_facts.py │ ├── nsxt_cluster_profiles.py │ ├── nsxt_cluster_profiles_facts.py │ ├── nsxt_deploy_ova.py │ ├── nsxt_edge_clusters.py │ ├── nsxt_edge_clusters_facts.py │ ├── nsxt_fabric_compute_managers.py │ ├── nsxt_fabric_compute_managers_facts.py │ ├── nsxt_global_manager_active.py │ ├── nsxt_global_manager_enable_service.py │ ├── nsxt_global_manager_registration.py │ ├── nsxt_ip_blocks.py │ ├── nsxt_ip_blocks_facts.py │ ├── nsxt_ip_pools.py │ ├── nsxt_ip_pools_facts.py │ ├── nsxt_licenses.py │ ├── nsxt_licenses_facts.py │ ├── nsxt_local_manager_registration.py │ ├── nsxt_local_managers_compatibility.py │ ├── nsxt_local_managers_facts.py │ ├── nsxt_logical_ports.py │ ├── nsxt_logical_ports_facts.py │ ├── nsxt_logical_router_ports.py │ ├── nsxt_logical_router_ports_facts.py │ ├── nsxt_logical_router_static_routes.py │ ├── nsxt_logical_routers.py │ ├── nsxt_logical_routers_facts.py │ ├── nsxt_logical_switches.py │ ├── nsxt_logical_switches_facts.py │ ├── nsxt_manager_auto_deployment.py │ ├── nsxt_manager_auto_deployment_facts.py │ ├── nsxt_manager_status.py │ ├── nsxt_policy_bfd_profile.py │ ├── nsxt_policy_gateway_policy.py │ ├── nsxt_policy_group.py │ ├── nsxt_policy_ip_block.py │ ├── nsxt_policy_ip_block_facts.py │ ├── nsxt_policy_ip_pool.py │ ├── nsxt_policy_ip_pool_facts.py │ ├── nsxt_policy_l2_bridge_ep_profile.py │ ├── nsxt_policy_security_policy.py │ ├── nsxt_policy_segment.py │ ├── nsxt_policy_tier0.py │ ├── nsxt_policy_tier1.py │ ├── nsxt_principal_identities.py │ ├── nsxt_principal_identities_facts.py │ ├── nsxt_repo_sync.py │ ├── nsxt_repo_sync_facts.py │ ├── nsxt_rest.py │ ├── nsxt_route_advertise.py │ ├── nsxt_transport_node_collections.py │ ├── nsxt_transport_node_collections_facts.py │ ├── nsxt_transport_node_profiles.py │ ├── nsxt_transport_node_profiles_facts.py │ ├── nsxt_transport_nodes.py │ ├── nsxt_transport_nodes_facts.py │ ├── nsxt_transport_zones.py │ ├── nsxt_transport_zones_facts.py │ ├── nsxt_upgrade_eula_accept.py │ ├── nsxt_upgrade_eula_accept_facts.py │ ├── nsxt_upgrade_groups.py │ ├── nsxt_upgrade_groups_facts.py │ ├── nsxt_upgrade_history.py │ ├── nsxt_upgrade_plan.py │ ├── nsxt_upgrade_plan_facts.py │ ├── nsxt_upgrade_postchecks.py │ ├── nsxt_upgrade_pre_post_checks_facts.py │ ├── nsxt_upgrade_prechecks.py │ ├── nsxt_upgrade_run.py │ ├── nsxt_upgrade_status_summary_facts.py │ ├── nsxt_upgrade_uc.py │ ├── nsxt_upgrade_uc_facts.py │ ├── nsxt_upgrade_upload_mub.py │ ├── nsxt_upgrade_upload_mub_facts.py │ ├── nsxt_uplink_profiles.py │ ├── nsxt_uplink_profiles_facts.py │ ├── nsxt_vidm.py │ ├── nsxt_virtual_ip.py │ ├── nsxt_virtual_ip_facts.py │ └── nsxt_vm_tags.py └── tests/ ├── playbooks/ │ ├── mp/ │ │ ├── answerfile.yml │ │ ├── answerfile_9x.yml │ │ ├── answerfile_attach_tnp_to_cluster.yml │ │ ├── answerfile_attach_tnp_to_cluster_9x.yml │ │ ├── answerfile_tn.yml │ │ ├── answerfile_tn_9x.yml │ │ ├── test_attach_tnp_to_cluster.yml │ │ ├── test_attach_tnp_to_cluster_9x.yml │ │ ├── test_basic_topology.yml │ │ ├── test_basic_topology_9x.yml │ │ ├── test_certificates.yml │ │ ├── test_certificates_facts.yml │ │ ├── test_cluster_profiles.yml │ │ ├── test_cluster_profiles_facts.yml │ │ ├── test_compute_managers.yml │ │ ├── test_compute_managers_facts.yml │ │ ├── test_configure_transport_node.yml │ │ ├── test_configure_transport_node_9x.yml │ │ ├── test_edge_clusters.yml │ │ ├── test_edge_clusters_facts.yml │ │ ├── test_global_manager_enable_service.yml │ │ ├── test_global_manager_registration.yml │ │ ├── test_global_managers_active.yml │ │ ├── test_ip_blocks.yml │ │ ├── test_ip_blocks_facts.yml │ │ ├── test_ip_pools.yml │ │ ├── test_ip_pools_facts.yml │ │ ├── test_licenses.yml │ │ ├── test_licenses_facts.yml │ │ ├── test_local_manager_registration.yml │ │ ├── test_local_managers_compatibility.yml │ │ ├── test_local_managers_facts.yml │ │ ├── test_logical_ports.yml │ │ ├── test_logical_ports_facts.yml │ │ ├── test_logical_router_ports.yml │ │ ├── test_logical_router_ports_facts.yml │ │ ├── test_logical_router_static_route.yml │ │ ├── test_logical_routers.yml │ │ ├── test_logical_routers_facts.yml │ │ ├── test_logical_switches.yml │ │ ├── test_logical_switches_facts.yml │ │ ├── test_manager_auto_deployment.yml │ │ ├── test_manager_auto_deployment_facts.yml │ │ ├── test_manager_status.yml │ │ ├── test_ovf_deployment.yml │ │ ├── test_principal_identities.yml │ │ ├── test_principal_identities_facts.yml │ │ ├── test_repo_sync.yml │ │ ├── test_repo_sync_facts.yml │ │ ├── test_rest.yml │ │ ├── test_route_advertise.yml │ │ ├── test_transport_node_collections.yml │ │ ├── test_transport_node_collections_facts.yml │ │ ├── test_transport_node_profiles.yml │ │ ├── test_transport_node_profiles_facts.yml │ │ ├── test_transport_nodes.yml │ │ ├── test_transport_nodes_edge.yml │ │ ├── test_transport_nodes_edge_9x.yml │ │ ├── test_transport_nodes_facts.yml │ │ ├── test_transport_nodes_host.yml │ │ ├── test_transport_nodes_host_9x.yml │ │ ├── test_transport_zones.yml │ │ ├── test_transport_zones_facts.yml │ │ ├── test_upgrade_eula_accept.yml │ │ ├── test_upgrade_eula_accept_facts.yml │ │ ├── test_upgrade_groups.yml │ │ ├── test_upgrade_groups_facts.yml │ │ ├── test_upgrade_history_facts.yml │ │ ├── test_upgrade_plan.yml │ │ ├── test_upgrade_plan_facts.yml │ │ ├── test_upgrade_postchecks.yml │ │ ├── test_upgrade_pre_post_checks_facts.yml │ │ ├── test_upgrade_prechecks.yml │ │ ├── test_upgrade_run.yml │ │ ├── test_upgrade_status_summary_facts.yml │ │ ├── test_upgrade_uc.yml │ │ ├── test_upgrade_uc_facts.yml │ │ ├── test_upgrade_upload_mub.yml │ │ ├── test_upgrade_upload_mub_facts.yml │ │ ├── test_uplink_profiles.yml │ │ ├── test_uplink_profiles_9x.yml │ │ ├── test_uplink_profiles_facts.yml │ │ ├── test_vidm.yml │ │ ├── test_virtual_ip.yml │ │ └── test_virtual_ip_facts.yml │ ├── policy/ │ │ ├── test_nsxt_policy_bfd_profile.yaml │ │ ├── test_nsxt_policy_gateway_policy.yml │ │ ├── test_nsxt_policy_group.yml │ │ ├── test_nsxt_policy_ip_block.yml │ │ ├── test_nsxt_policy_ip_pool.yml │ │ ├── test_nsxt_policy_l2_bridge_ep_profile.yml │ │ ├── test_nsxt_policy_security_policy.yml │ │ ├── test_nsxt_policy_segment.yml │ │ ├── test_nsxt_policy_tier0.yml │ │ ├── test_nsxt_policy_tier1.yml │ │ └── test_vm_tags.yaml │ └── topologies/ │ ├── deploy_nsx_cluster/ │ │ ├── 01_deploy_first_node.yml │ │ ├── 02_configure_compute_manager.yml │ │ ├── 03_deploy_second_third_node.yml │ │ ├── 04_add_nsx_license.yml │ │ ├── README.md │ │ └── deploy_nsx_cluster_vars.yml │ ├── misc/ │ │ └── create_and_attach_t0_t1_routers.yml │ ├── policy_modules/ │ │ ├── 01_create_t0_gateway.yml │ │ ├── 02_create_t1_gateway.yml │ │ ├── 03_create_segments.yml │ │ ├── 04_create_groups.yml │ │ ├── 05_create_security_policy.yml │ │ ├── README.md │ │ ├── build_topology.yml │ │ ├── build_topology_vars.yml │ │ └── cleanup_topology.yml │ ├── setup_infra/ │ │ ├── 01_setup_transport_zones.yml │ │ ├── 01_setup_transport_zones_9x.yml │ │ ├── 02_setup_TEP_IP_Pools.yml │ │ ├── 03_setup_transport_node_profiles.yml │ │ ├── 03_setup_transport_node_profiles_9x.yml │ │ ├── 04_setup_transport_nodes.yml │ │ ├── 04_setup_transport_nodes_9x.yml │ │ ├── 05_setup_edge_cluster.yml │ │ ├── 06_setup_transport_node_collections.yml │ │ ├── README.md │ │ ├── setup_infra_vars.yml │ │ └── setup_infra_vars_9x.yml │ └── upgrade/ │ ├── 01_upgrade_upload_mub.yml │ ├── 02_upgrade_accept_eula.yml │ ├── 03_upgrade_uc.yml │ ├── 04_upgrade_update_plan.yml │ ├── 05_upgrade_update_groups.yml │ ├── 06_upgrade_prechecks.yml │ ├── 07_upgrade_run.yml │ ├── README.md │ ├── check_upgrade_groups_facts.yml │ ├── check_upgrade_pre_post_checks_facts.yml │ ├── check_upgrade_status_summary_facts.yml │ └── upgrade_vars.yml └── unit/ ├── __init__.py └── plugins/ ├── __init__.py └── module_utils/ ├── __init__.py ├── test_nsxt_base_resource.py └── test_policy_communicator.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ # build products... *.py[co] # Mac OS X stuff... .DS_Store # test output *.retry # JetBrains project dir .idea ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to ansible-for-nsxt The ansible-for-nsxt project team welcomes contributions from the community. Before you start working with ansible-for-nsxt, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. ## Community ## Getting Started ## Contribution Flow This is a rough outline of what a contributor's workflow looks like: - Create a topic branch from where you want to base your work - Make commits of logical units - Make sure your commit messages are in the proper format (see below) - Push your changes to a topic branch in your fork of the repository - Submit a pull request Example: ``` shell git remote add upstream https://github.com/vmware/ansible-for-nsxt.git git checkout -b my-new-feature master git commit -a git push origin my-new-feature ``` ### Staying In Sync With Upstream When your branch gets out of sync with the vmware/master branch, use the following to update: ``` shell git checkout my-new-feature git fetch -a git pull --rebase upstream master git push --force-with-lease origin my-new-feature ``` ### Updating pull requests If your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into existing commits. If your pull request contains a single commit or your changes are related to the most recent commit, you can simply amend the commit. ``` shell git add . git commit --amend git push --force-with-lease origin my-new-feature ``` If you need to squash changes into an earlier commit, you can use: ``` shell git add . git commit --fixup git rebase -i --autosquash master git push --force-with-lease origin my-new-feature ``` Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub does not generate a notification when you git push. ### Code Style ### Formatting Commit Messages We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/). Be sure to include any related GitHub issue references in the commit message. See [GFM syntax](https://guides.github.com/features/mastering-markdown/#GitHub-flavored-markdown) for referencing issues and commits. ## Reporting Bugs and Creating Issues When opening a new issue, try to roughly follow the commit message format conventions above. ## Repository Structure ================================================ FILE: LICENSE.txt ================================================ ansible-for-nsxt Copyright (c) 2018 VMware, Inc. All rights reserved SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only This code is Dual Licensed BSD 2-Clause or GPLv3 =============================================================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================== GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: README.md ================================================ # Ansible for NSX-T ## Overview This repository contains NSX-T Ansible Modules, which one can use with Ansible to work with [VMware NSX-T Data Center][vmware-nsxt]. [vmware-nsxt]: https://www.vmware.com/products/nsx.html For general information about Ansible, visit the [GitHub project page][an-github]. [an-github]: https://github.com/ansible/ansible These modules are maintained by [VMware](https://www.vmware.com/). Documentation on the NSX platform can be found at the [NSX-T Documentation page](https://docs.vmware.com/en/VMware-NSX-T/index.html) ## NSX Compatibility The following versions of NSX are supported: * NSX-T 4.2 * NSX-T 4.1 * NSX-T 4.0 * NSX-T 3.2 * NSX-T 3.1 * NSX-T 3.0 * NSX-T 2.5.1 ## Prerequisites Using Ansible-for-nsxt requires the following packages to be installated. Installation steps differ based on the platform (Mac/iOS, Ubuntu, Debian, CentOS, RHEL etc). Please follow the links below to pick the right platform. * Ansible >= 2.10.x [Ansible Installation Documentation](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * Python3 >= 3.6.x [Python Documentation](https://www.python.org/downloads/) * pip3 >= 9.x Python Installation [PIP installation](https://pip.pypa.io/en/stable/installing/) * PyVmOmi - Python library for vCenter api. Installation via pip: [pyVmomi installation](https://pypi.org/project/pyvmomi/) * OVF Tools >= 4.4.x - Ovftool is used for ovf deployment [OVFTool Download and Installation](https://code.vmware.com/web/tool/4.4.0/ovf) ## Installation ansible-for-nsxt modules are distributed as Ansible Galaxy collection. Please use the following command to install it ``` ansible-galaxy collection install git+https://github.com/vmware/ansible-for-nsxt ``` Specify latest supported release branch ``` ansible-galaxy collection install git+https://github.com/vmware/ansible-for-nsxt.git,v3.2.0 ``` ## Usage Once installed, the modules can be directly run with ansible-playbook. For example, you can run: ``` ansible-playbook test_logical_switches.yml ``` The modules require you to provide details about how to authenticate with NSX-T. ### Using modules in the tests folder There are complete workflow example modules in the tests/playbooks folder. To use them, edit the corresponding vars file if rqeuired. Then run using ansible-playbook. For example, ``` ansible-playbook 01_create_t0_gateway.yml ``` ### Supported NSX Objects/Workflows The modules in this repository are focused on enabling automation of installation workflows of NSX-T. We have modules that support the legacy MP and new Policy API. #### MP API MP API modules can be used to configure an NSX resource with one-to-one mapping. ### Branch Information This repository has different branches with each branch providing support for upto a specific NSX-T release. Below is the list: * Master: Latest code, under development * v3.2.0: NSX-T 3.2.x and below * v3.0.1: NSX-T 3.1.x and below * v3.0.0: NSX-T 3.0.x and below * v1.1.0: NSX-T 2.4, NSX-T 2.5 * v1.0.0: NSX-T 2.3 ##### Deployment and installation modules * nsxt_deploy_ova * nsxt_licenses * nsxt_manager_status * nsxt_licenses_facts * nsxt_edge_clusters * nsxt_edge_clusters_facts * nsxt_fabric_compute_managers * nsxt_fabric_compute_managers_facts * nsxt_ip_pools * nsxt_ip_pools_facts * nsxt_uplink_profiles * nsxt_uplink_profiles_facts * nsxt_transport_zones * nsxt_transport_zones_facts * nsxt_transport_nodes * nsxt_transport_nodes_facts * nsxt_transport_node_collections * nsxt_transport_node_collections_facts * nsxt_transport_node_profiles * nsxt_transport_node_profiles_facts * nsxt_controller_manager_auto_deployment ###### Logical networking modules * nsxt_logical_ports * nsxt_logical_ports_facts * nsxt_logical_routers * nsxt_logical_routers_facts * nsxt_logical_router_ports * nsxt_logical_router_ports_facts * nsxt_logical_router_static_routes * nsxt_logical_switches * nsxt_logical_switches_facts * nsxt_ip_blocks * nsxt_ip_blocks_facts #### Policy API Policy API modules are aggregated such that logical constructs related to an NSX resource can be configured using a single playbook. They can be identified with prefix *nsxt_policy_*. The below list outlines the supported modules and the resources that can be configured through a module. Note that the Policy modules are supported only for NSX-T 3.0 and above. 1. Tier-0 Gateway (nsxt_policy_tier0) 1. Tier-0 Locale Services 2. Tier-0 Static Routes 3. Tier-0 Interface 4. Tier-0 BGP 5. Tier-0 BGP Neighbors 6. Tier-0 VRF 7. Tier-0 BFD Peers 2. Tier-1 Gateway (nsxt_policy_tier1) 1. Tier-1 Locale Services 2. Tier-1 Static Routes 3. Tier-1 Interface 3. Segment (nsxt_policy_segment) 1. Segment Port 4. Policy Group (nsxt_policy_group) 5. Security Policy and Firewall rules (nsxt_policy_security_policy) 6. IP Pools (nsxt_policy_ip_pool) 1. IP Address Pool Block Subnet 2. IP Address Pool Static Subnet 7. IP Blocks (nsxt_policy_ip_block) 8. BFD Profile (nsxt_policy_bfd_profile) 9. VM Tags (nsxt_vm_tags) 10. Gateway Policy (nsxt_policy_gateway_policy) 11. L2 Bridge Endpoint Profile (nsxt_policy_l2_bridge_ep_profile) Note that to add a new modules in Policy API, it's base class name should be added in the BASE_RESOURCES in module_utils/nsxt_base_resource.py ## Build & Run ### Install PyVmOmi ``` pip install --upgrade pyvmomi pyvim requests ssl ``` ### Download and Install Ovf tool 4.3 - [Ovftool](https://my.vmware.com/web/vmware/details?downloadGroup=OVFTOOL430&productId=742) (Note: Using ovftool version 4.0/4.1 causes OVA/OVF deployment failure with Error: cURL error: SSL connect error\nCompleted with errors\n) ### Authentication #### Using MP API Ansible-for-nsxt supports two types of authentication using MP API. 1. Basic server authentication 2. Certificate based authentication ##### Basic server authentication In basic server authentication, client has to explicitly provide NSX username and password for the NSX manager. The credentials have to be listed in ansible-playbook. ##### Certificate based authentication In certificate based authentication, client has to register their certificates to NSX manager using nsxt_certificates task. After registering the certificates, client has to create its own principal identity on NSX manager using nsxt_principal_identities taks. The process of certificate registration and creation of principal identity has to be done using basic server authentication. Use test_certificates.yml and test_principal_identities.yml to match the values according to the client's environment. ``` ansible-playbook test_certificates.yml -vvv ansible-playbook test_principal_identities -vvv ``` The path of the .p12 file i.e the file containing public and private key has to be set to an environment variable named NSX_MANAGER_CERT_PATH. **Note:** Make sure NSX_MANAGER_CERT_PATH is set in the same remote host, where modules would be executed. ###### Generating certificates? Following commands can be used in order to generate certificates. ``` openssl req -newkey rsa:2048 -extensions usr_cert -nodes -keyout nsx_certificate.key -x509 -days 365 -out nsx_certificate.crt -subj "/C=US/ST=California/L=PaloAlto/O=VMware/CN=certauth-test" -sha256 openssl pkcs12 -export -out nsx_certificate.pfx -inkey nsx_certificate.key -in nsx_certificate.crt openssl pkcs12 -in nsx_certificate.pfx -out nsx_certificate.p12 -nodes ``` The nsx_certificate.crt file generated as output from the above command contains the public key certificate. the file nsx_certificate.p12 file contains the public and private key generated. The path of nsx_certificate.p12 file has to be set in the environment variable NSX_MANAGER_CERT_PATH. Note: usr_cert tells OpenSSL to generate a client certificate. This must be defined in openssl.cnf. #### Validate CA in MP API To validate ceritificate authority (CA), set NSX_MANAGER_CA_PATH environment variable on Ansible control node pointing to CA certificate of NSX manager and pass validate_certs as ``True`` in ansible playbook. #### Using Policy API All the Policy API based Ansible Modules provide the following authentication mechanisms: ##### Basic Authentication This is the same as in MP API. It can be used by specifying the following fields in the playbook: 1. **username**: The username to authenticate with the NSX manager 2. **password**: The password to authenticate with the NSX manager For example: ```yaml - hosts: localhost tasks: - name: Update Tier0 nsxt_policy_tier0: hostname: "default" username: admin password: my-password validate_certs: False display_name: test-tier0-1 state: present ``` ##### Prinicipal Identity There are 2 ways to consume the Principal Identity certificates. ###### Using Environment variable This is same as explained in the previous section: **Certificate based authentication** ###### Specifying in the playbook By specifying the following fields in the playbook: 1. **nsx_cert_path**: Path to the certificate created for the Principal Identity using which the CRUD operations should be performed. If the certificate is a .p12 file, only this attribute is required. Otherwise, *nsx_key_path* is also required. 2. **nsx_key_path**: Path to the certificate key created for the Principal Identity using which the CRUD operations should be performed For example: ```yaml - hosts: localhost tasks: - name: Update Tier0 nsxt_policy_tier0: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False display_name: test-tier0-1 state: present ``` ##### vIDM When NSX-T is configured to use VMware Identity Manager (vIDM) for authentication, you can supply an Authorization header with an authentication type of *Remote*. The header content should consist of a base64-encoded string containing the username@domain and password separated by a single colon (":") character, as specified in RFC 1945 section 11.1. For example, to authenticate a request using the credentials of user jsmith@example.com with password Sk2LkPM!, include the following key:value pair under **request_headers** in the playbook:: - Authorization: 'Remote anNtaXRoQGV4YW1wbGUuY29tOlNrMkxrUE0h' For example: ```yaml - hosts: localhost tasks: - name: Update Tier0 nsxt_policy_tier0: hostname: "default" request_headers: Authorization: 'Remote anNtaXRoQGV4YW1wbGUuY29tOlNrMkxrUE0h' validate_certs: False display_name: test-tier0-1 state: present ``` ##### SSL Verification You can use the flag *validate_certs* to perform SSL verification. You can also specify the path to a CA bundle using the paratemer *ca_path* in the playbook. For example: ```yaml - hosts: localhost tasks: - name: Update Tier0 nsxt_policy_tier0: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-tier0-1 state: present ``` # Contributing The ansible-for-nsxt project team welcomes contributions from the community. Before you start working with ansible-for-nsxt, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. For more detailed information, refer to [CONTRIBUTING.md](CONTRIBUTING.md). Please open a Pull-Request against the Master branch. # Support Released NSX-T Ansible modules are fully supported by VMware. The released modules are available in the specific numbered release branches: * v3.2.0 * v3.0.1 * v3.0.0 * v1.1.0 * v1.0.0 They are also available for download from VMware's download page. The *master* branch contains the latest development code which is community supported. For bugs and feature requests, please open a Github Issue and label it appropriately. # License Copyright (c) 2020 VMware, Inc. All rights reserved The NSX-T Ansible modules in this repository are available under [BSD-2 license or GPLv3](LICENSE.txt) applies to all parts of the ansible-for-nsxt. You may not use them except in compliance with the License. ================================================ FILE: galaxy.yml ================================================ namespace: vmware name: ansible_for_nsxt version: 1.0.0 readme: README.md authors: - Gautam Verma @ggverma (https://vmware.slack.com/archives/CTE293BSS) - Rahul Raghuvanshi @r-raghu (https://vmware.slack.com/archives/CTE293BSS) description: Ansible Modules for NSX-t license: - GPL-3.0-only - BSD-2-Clause-FreeBSD license_file: LICENSE.txt tags: [vmware, ansible, nsxt] dependencies: {} repository: https://github.com/vmware/ansible-for-nsxt documentation: https://github.com/vmware/ansible-for-nsxt homepage: https://github.com/vmware/ansible-for-nsxt issues: https://github.com/vmware/ansible-for-nsxt/issues build_ignore: [] ================================================ FILE: meta/runtime.yml ================================================ requires_ansible: ">=2.9" ================================================ FILE: plugins/doc_fragments/vmware_nsxt.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class ModuleDocFragment(object): # VMware NSX-T documentation fragment DOCUMENTATION = """ options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. type: str password: description: - The password to authenticate with the NSX manager. - Must be specified if username is specified type: str ca_path: description: Path to the CA bundle to be used to verify host's SSL certificate type: str nsx_cert_path: description: Path to the certificate created for the Principal Identity using which the CRUD operations should be performed type: str nsx_key_path: description: - Path to the certificate key created for the Principal Identity using which the CRUD operations should be performed - Must be specified if nsx_cert_path is specified type: str request_headers: description: HTTP request headers to be sent to the host while making any request type: dict display_name: description: - Display name. - If resource ID is not specified, display_name will be used as ID. required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true validate_certs: description: Enable server certificate verification. type: bool default: False tags: description: Opaque identifiers meaningful to the API user. type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str create_or_update_subresource_first: type: bool default: false description: - Can be used to create subresources first. - Can be specified for each subresource. delete_subresource_first: type: bool default: true description: - Can be used to delete subresources first. - Can be specified for each subresource. achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager. - Can be specified for each subresource. """ ================================================ FILE: plugins/module_utils/__init__.py ================================================ ================================================ FILE: plugins/module_utils/common_utils.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import request from ansible.module_utils._text import to_native import ipaddress def check_if_valid_ip(address): ''' params: - ip_address: IP Address in string format result: checks if the IP address is valid or not. ''' try: ip = ipaddress.ip_address(address) if isinstance(ip, ipaddress.IPv4Address): ip_octets = address.split('.') valid_ip_octets = [int(ip_octet) for ip_octet in ip_octets] valid_ip_octets = [ip_octet for ip_octet in valid_ip_octets if ip_octet >= 0 and ip_octet<=255] return len(ip_octets) == 4 and len(valid_ip_octets) == 4 elif isinstance(ip, ipaddress.IPv6Address): return True except ValueError: return False def traverse_and_retrieve_value(object , attribute_list): ''' params: - object: Object where value is to be searched from attribute list - attribute_list: List to be used for searching attribute value ''' if object is None: return None for attribute in attribute_list: if object.__contains__(attribute): object = object[attribute] else: raise Exception('AttributeError: Attribute value \"%s\" not found ' 'while traversing.' % attribute) return object def get_attribute_from_endpoint(module, manager_url, endpoint, mgr_username, mgr_password, validate_certs, attribute_name, fail_on_error=True): ''' params: - endpoint: API endpoint. - attribute_name: Name of attribute whose value is required result: attribute value of the attribute name provided. ''' try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: if fail_on_error: module.fail_json(msg='Error while retrieving' ' %s. Error [%s]' % (attribute_name, to_native(err))) else: return None if resp.__contains__(attribute_name): return resp[attribute_name] return None def get_id_from_display_name_results(module, manager_url, endpoint, mgr_username, mgr_password, validate_certs, search_attribute_list, return_attribute_list, display_name, fail_module=True): ''' params: - endpoint: API endpoint. - search_attribute_list: List of name attribute the depth to be searched in the result object - return_attribute_list: List of name attribute the depth to be returned in the result object - display_name: The name to be matched - id_attribute: id_attribute whose value is to be returned ''' try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while converting the passed name to' ' ID. Error [%s]' % to_native(err)) try: for result in resp['results']: if traverse_and_retrieve_value(result, search_attribute_list) == display_name: return traverse_and_retrieve_value(result, return_attribute_list) except Exception as err: module.fail_json(msg='Error while getting id from display name. Error [%s]' % to_native(err)) if fail_module: module.fail_json(msg='No id exist with display name %s' % display_name) else: return None def wait_for_operation_to_execute(manager_url, endpoint, mgr_username, mgr_password, validate_certs, attribute_list, desired_attribute_values, undesired_attribute_values, time_out=10800): ''' params: - endpoint: API endpoint. - attribute_list: The attribute whose value should become the desired attribute value - desired_attribute_value: The desired attribute value Function will wait till the attribute value derived from going deep to attribute list becomes equal to desired_attribute_value. ''' operation_time = 0 while True: try: (rc, resp) = request(manager_url + endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: pass try: retrieved_value = traverse_and_retrieve_value(resp, attribute_list) if retrieved_value in desired_attribute_values: return None if retrieved_value in undesired_attribute_values: raise Exception(resp) except Exception as err: pass time.sleep(10) operation_time = operation_time + 10 if operation_time > time_out: raise Exception('Operation timed out.') def clean_and_get_params(args=None, extra_args_to_remove=[]): ''' params: - args: All the arguments to be removed ''' args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] args_to_remove.extend(extra_args_to_remove) for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs): ''' params: - mgr_hostname: Any one of the manager node in manager cluster Returns the upgrade orchestrator node ''' try: (rc, resp) = request('https://%s/api/v1/node/services/install-upgrade' % mgr_hostname, headers=headers, url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(changed=True, msg='Error getting ip address of the upgrade' ' orchestrator node. Error: {}'.format(err)) return resp['service_properties']['enabled_on']; ================================================ FILE: plugins/module_utils/nsxt_base_resource.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_communicator import PolicyCommunicator from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_communicator import DuplicateRequestError from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import sys if sys.version_info[0] < 3: raise Exception("Must be using Python 3") from abc import ABC, abstractmethod import time import json import inspect # Add all the base resources that can be configured in the # Policy API here. Required to infer base resource params. BASE_RESOURCES = {"NSXTSegment", "NSXTTier0", "NSXTTier1", "NSXTSecurityPolicy", "NSXTPolicyGroup", "NSXTIpBlock", "NSXTIpPool", "NSXTBFDProfile", "NSXTGatewayPolicy", "NSXTL2BridgeEpProfile"} class NSXTBaseRealizableResource(ABC): INCORRECT_ARGUMENT_NAME_VALUE = "error_invalid_parameter" def realize(self, supports_check_mode=True, successful_resource_exec_logs=[], baseline_arg_names=[], resource_params=None): # must call this method to realize the creation, update, or deletion of # resource self.resource_class = self.__class__ if not hasattr(self, "_arg_spec"): # Base resource self._make_ansible_arg_spec( supports_check_mode=supports_check_mode) if not hasattr(self, 'module'): self.module = AnsibleModule( argument_spec=self._arg_spec, supports_check_mode=supports_check_mode) self.set_baseline_args(baseline_arg_names) # Infer manager credentials mgr_hostname = self.module.params['hostname'] mgr_username = self.module.params['username'] mgr_password = self.module.params['password'] nsx_cert_path = self.module.params['nsx_cert_path'] nsx_key_path = self.module.params['nsx_key_path'] request_headers = self.module.params['request_headers'] ca_path = self.module.params['ca_path'] validate_certs = self.module.params['validate_certs'] # Each manager has an associated PolicyCommunicator self.policy_communicator = PolicyCommunicator.get_instance( mgr_hostname, mgr_username, mgr_password, nsx_cert_path, nsx_key_path, request_headers, ca_path, validate_certs) if resource_params is None: resource_params = self.module.params self.resource_params = resource_params self._state = self.get_attribute('state', resource_params) if not (hasattr(self, 'id') and self.id): if self.get_resource_name() in BASE_RESOURCES: self.id = self._get_id_using_attr_name( None, resource_params, self.get_resource_base_url(self.baseline_args), self.get_spec_identifier(), fail_if_not_found=False) else: self.id = self._get_id_using_attr_name( None, resource_params, self.get_resource_base_url(self._parent_info), self.get_spec_identifier(), fail_if_not_found=False) if self.id is None: self.id = self.infer_resource_id(self._parent_info) if self.id is None: self.module.fail_json( msg="Please specify either id or display_name for " "resource {}".format(str( self.get_spec_identifier()))) # Extract the resource params from module self.nsx_resource_params = self._extract_nsx_resource_params( resource_params) # parent_info is passed to subresources of a resource automatically if not hasattr(self, "_parent_info"): self._parent_info = {} self.update_parent_info(self._parent_info) try: # get existing resource schema _, self.existing_resource = self._send_request_to_API( suffix="/" + self.id, ignore_error=False, accepted_error_codes=set([404])) self.existing_resource_revision = self.existing_resource[ '_revision'] # As Policy API's PATCH requires all attributes to be filled, # we fill the missing resource params (the params not specified) # by user using the existing params self._fill_missing_resource_params( self.existing_resource, self.nsx_resource_params) except Exception as err: # the resource does not exist currently on the manager self.existing_resource = None self.existing_resource_revision = None finally: self._clean_none_resource_params( self.existing_resource, self.nsx_resource_params) self._achieve_state(resource_params, successful_resource_exec_logs) @classmethod def get_spec_identifier(cls): # Can be overriden in the subclass to provide different # unique_arg_identifier. It is used to infer which args belong to which # subresource. # By default, class name is used for subresources. return cls.get_resource_name() def get_state(self): return self._state def get_parent_info(self): return self._parent_info def infer_resource_id(self, parent_info): # This is called when the user has not specified the ID or # display_name of any child resource or its sub-resources pass @staticmethod @abstractmethod def get_resource_base_url(parent_info): # Must be overridden by the subclass raise NotImplementedError @staticmethod @abstractmethod def get_resource_spec(): # Must be overridden by the subclass raise NotImplementedError @classmethod def get_resource_name(cls): return cls.__name__ def create_or_update_subresource_first(self): # return True if subresource should be created/updated before parent # resource return self.resource_params.get( "create_or_update_subresource_first", False) def delete_subresource_first(self): # return True if subresource should be deleted before parent resource return self.resource_params.get("delete_subresource_first", True) def achieve_subresource_state_if_del_parent(self): # return True if this resource is to be realized with its own specified # state irrespective of the state of its parent resource. return self.resource_params.get( "achieve_subresource_state_if_del_parent", False) def do_wait_till_create(self): # By default, we do not wait for the parent resource to be created or # updated before its subresource is to be realized. return self.resource_params.get("do_wait_till_create", False) @staticmethod def get_resource_update_priority(): # this priority can be used to create/delete subresources # at the same level in a particular order. # by default, it returns 1 so the resources are created/updated/ # deleted in a fixed but random order. # should be overloaded in subclass to specify its priority. # for creation or update, we iterate in descending order. # for deletion, we iterate in ascending order. return 1 def set_arg_spec(self, arg_spec): self._arg_spec = arg_spec def set_ansible_module(self, ansible_module): self.module = ansible_module def set_parent_info(self, parent_info): self._parent_info = parent_info def achieve_subresource_state( self, resource_params, successful_resource_exec_logs): """ Achieve the state of each sub-resource. """ for sub_resource_class in self._get_sub_resources_class_of( self.resource_class): if sub_resource_class.allows_multiple_resource_spec(): children_resource_spec = (resource_params.get( sub_resource_class.get_spec_identifier()) or []) else: children_resource_spec = ([resource_params.get( sub_resource_class.get_spec_identifier())] or []) # Update the parent pointer my_parent = self._parent_info.get('_parent', '') self._update_parent_info() for resource_param_spec in children_resource_spec: if resource_param_spec is not None: sub_resource = sub_resource_class() sub_resource.set_arg_spec(self._arg_spec) sub_resource.set_ansible_module(self.module) sub_resource.set_parent_info(self._parent_info) sub_resource.realize( successful_resource_exec_logs=( successful_resource_exec_logs), resource_params=resource_param_spec) # Restore the parent pointer self._parent_info['_parent'] = my_parent def update_resource_params(self, nsx_resource_params): # Can be used to updates the params of resource before making # the API call. # Should be overridden in the subclass if needed pass def check_for_update(self, existing_params, resource_params): """ resource_params: dict existing_params: dict Compares the existing_params with resource_params and returns True if they are different. At a base level, it traverses the params and matches one-to-one. If the value to be matched is a - dict, it traverses that also. - list, it merely compares the order. Can be overriden in the subclass for specific custom checking. Returns true if the params differ """ if not existing_params: return False for k, v in resource_params.items(): if k not in existing_params: return True elif type(v).__name__ == 'dict': if self.check_for_update(existing_params[k], v): return True elif v != existing_params[k]: def compare_lists(list1, list2): # Returns True if list1 and list2 differ try: # If the lists can be converted into sets, do so and # compare lists as sets. set1 = set(list1) set2 = set(list2) return set1 != set2 except Exception: return True if type(v).__name__ == 'list': if compare_lists(v, existing_params[k]): return True continue return True return False def update_parent_info(self, parent_info): # Override this and fill in self._parent_info if that is to be passed # to the sub-resource # By default, parent's id is passed parent_info[self.get_spec_identifier() + "_id"] = self.id def get_attribute(self, attribute, resource_params): """ attribute: String resource_params: Parameters of the resource """ if (attribute == "state" and self.get_resource_name() not in BASE_RESOURCES): # if parent has absent state, subresources should have absent # state if . So, irrespective of what user specifies, if parent # is to be deleted, the child resources will be deleted. # override achieve_subresource_state_if_del_parent # in resource class to change this behavior if (self._parent_info["_parent"].get_state() == "absent" and not self.achieve_subresource_state_if_del_parent()): return "absent" return resource_params.get( attribute, self.INCORRECT_ARGUMENT_NAME_VALUE) def set_baseline_args(self, baseline_arg_names): # Can be overriden in subclass self.baseline_args = {} for baseline_arg_name in baseline_arg_names: self.baseline_args[baseline_arg_name] = self.module.params[ baseline_arg_name] def do_resource_params_have_attr_with_id_or_display_name(self, attr): if (attr + "_id" in self.nsx_resource_params or attr + "_display_name" in self.nsx_resource_params): return True return False def get_id_using_attr_name_else_fail(self, attr_name, params, resource_base_url, resource_type): return self._get_id_using_attr_name( attr_name, params, resource_base_url, resource_type, fail_if_not_found=True) def exit_with_failure(self, msg, **kwargs): self.module.fail_json(msg=msg, **kwargs) def skip_delete(self): """ Override in subclass if this resource is skipped to be deleted. Note that the children of this resource will still be deleted unless they override this method as well. """ return False @classmethod def is_required_in_spec(cls): """ Override in subclass if this resource is optional to be specified in the ansible playbook. """ return False @classmethod def allows_multiple_resource_spec(cls): """ Override in the resource class definition with False if only one resource can be associated with the parent. By default, we accept multiple """ return True def _get_id_using_attr_name(self, attr_name, params, resource_base_url, resource_type, fail_if_not_found=True): # Pass attr_name '' or None to infer base resource's ID id_identifier = 'id' display_name_identifier = 'display_name' if attr_name: id_identifier = attr_name + "_id" display_name_identifier = attr_name + "_display_name" if id_identifier in params and params[id_identifier]: return params.pop(id_identifier) if (display_name_identifier in params and params[display_name_identifier]): resource_display_name = params.pop(display_name_identifier) # Use display_name as ID if ID is not specified. return (self.get_id_from_display_name( resource_base_url, resource_display_name, resource_type, not fail_if_not_found) or resource_display_name) if fail_if_not_found: # Incorrect usage of Ansible Module self.module.fail_json( msg="Please specify either {} id or display_name for the " "resource {}".format(attr_name, str(resource_type))) def get_id_from_display_name(self, resource_base_url, resource_display_name, resource_type, ignore_not_found_error=True): try: # Get the id from the Manager (_, resp) = self._send_request_to_API( resource_base_url=resource_base_url) matched_resource = None for resource in resp: if (resource.__contains__('display_name') and resource['display_name'] == resource_display_name): if matched_resource is None: matched_resource = resource else: # Multiple resources with same display_name! # Ask the user to specify ID instead. self.module.fail_json( msg="Multiple {} found with display_name {}. " "Please specify the resource using id in " "the playbook.".format(resource_type, resource_display_name)) if matched_resource is not None: return matched_resource['id'] else: if ignore_not_found_error: return None else: # No resource found with this display_name self.module.fail_json( msg="No {} found with display_name {} for the " "specified configuration.".format( resource_type, resource_display_name)) except Exception as e: # Manager replied with invalid URL. It means that the resource # does not exist on the Manager. So, return the display_name return resource_display_name def _update_parent_info(self): # This update is always performed and should not be overriden by the # subresource's class self._parent_info["_parent"] = self def _make_ansible_arg_spec(self, supports_check_mode=True): """ We read the arg_spec of all the resources associated that are associated with this resource and create one complete arg_spec. """ if self.get_resource_name() in BASE_RESOURCES: self._arg_spec = {} # Update it with VMware arg spec self._arg_spec.update( PolicyCommunicator.get_vmware_argument_spec()) # ... then update it with top most resource spec ... self._update_arg_spec_with_resource( self.resource_class, self._arg_spec) # Update with all sub-resources arg spec for sub_resources_class in self._get_sub_resources_class_of( self.resource_class): self._update_arg_spec_with_all_resources( sub_resources_class, self._arg_spec) def _update_arg_spec_with_resource(self, resource_class, arg_spec): # updates _arg_spec with resource_class's arg_spec resource_arg_spec = self._get_base_arg_spec_of_resource() resource_arg_spec.update(self._get_base_arg_spec_of_nsx_resource()) resource_arg_spec.update(resource_class.get_resource_spec()) if resource_class.__name__ not in BASE_RESOURCES: arg_spec.update( { resource_class.get_spec_identifier(): dict( options=resource_arg_spec, required=resource_class.is_required_in_spec(), type='dict', ) }) if resource_class.allows_multiple_resource_spec(): arg_spec[resource_class.get_spec_identifier()]['type'] = 'list' arg_spec[resource_class.get_spec_identifier()]['elements'] = ( 'dict') else: arg_spec.update(resource_arg_spec) return resource_arg_spec def _update_arg_spec_with_all_resources(self, resource_class, arg_spec): # updates _arg_spec with resource_class's arg_spec and all it's # sub-resources resource_arg_spec = self._update_arg_spec_with_resource( resource_class, arg_spec) # go to each child of resource_class and update it for sub_resources_class in self._get_sub_resources_class_of( resource_class): self._update_arg_spec_with_all_resources( sub_resources_class, resource_arg_spec) def _get_base_arg_spec_of_nsx_resource(self): resource_base_arg_spec = {} resource_base_arg_spec.update( # these are the base args for any NSXT Resource display_name=dict( required=False, type='str' ), description=dict( required=False, type='str' ), tags=dict( required=False, type='list', elements='dict', options=dict( scope=dict( required=True, type='str' ), tag=dict( required=True, type='str' ) ) ) ) return resource_base_arg_spec def _get_base_arg_spec_of_resource(self): resource_base_arg_spec = {} resource_base_arg_spec.update( id=dict( type='str' ), state=dict( required=True, type='str', choices=['present', 'absent'] ), create_or_update_subresource_first=dict( default=False, type='bool' ), delete_subresource_first=dict( default=True, type='bool' ), achieve_subresource_state_if_del_parent=dict( default=False, type='bool' ), do_wait_till_create=dict( default=False, type='bool' ) ) return resource_base_arg_spec def _extract_nsx_resource_params(self, resource_params): # extract the params belonging to this resource only. filtered_params = {} def filter_with_spec(spec): for key in spec.keys(): if (key in resource_params and resource_params[key] is not None): filtered_params[key] = resource_params[key] filter_with_spec(self.get_resource_spec()) filter_with_spec(self._get_base_arg_spec_of_nsx_resource()) return filtered_params def _achieve_present_state(self, successful_resource_exec_logs): self.update_resource_params(self.nsx_resource_params) is_resource_updated = self.check_for_update( self.existing_resource, self.nsx_resource_params) if not is_resource_updated: # Either the resource does not exist or it exists but was not # updated in the YAML. if self.module.check_mode: successful_resource_exec_logs.append({ "changed": True, "debug_out": self.resource_params, "id": '12345', "resource_type": self.get_resource_name() }) return try: if self.existing_resource: # Resource already exists successful_resource_exec_logs.append({ "changed": False, "id": self.id, "message": "%s with id %s already exists." % (self.get_resource_name(), self.id), "resource_type": self.get_resource_name() }) return # Create a new resource _, resp = self._send_request_to_API( suffix="/" + self.id, method='PATCH', data=self.nsx_resource_params) if self.do_wait_till_create() and not self._wait_till_create(): raise Exception successful_resource_exec_logs.append({ "changed": True, "id": self.id, "body": str(resp), "message": "%s with id %s created." % (self.get_resource_name(), self.id), "resource_type": self.get_resource_name() }) except Exception as err: srel = successful_resource_exec_logs self.module.fail_json(msg="Failed to add %s with id %s." "Request body [%s]. Error[%s]." % (self.get_resource_name(), self.id, self.nsx_resource_params, to_native(err) ), successfully_updated_resources=srel) else: # The resource exists and was updated in the YAML. if self.module.check_mode: successful_resource_exec_logs.append({ "changed": True, "debug_out": self.resource_params, "id": self.id, "resource_type": self.get_resource_name() }) return self.nsx_resource_params['_revision'] = \ self.existing_resource['_revision'] try: _, patch_resp = self._send_request_to_API( suffix="/"+self.id, method="PATCH", data=self.nsx_resource_params) # Get the resource again and compare version numbers _, updated_resource_spec = self._send_request_to_API( suffix="/"+self.id, method="GET") if updated_resource_spec[ '_revision'] != self.existing_resource_revision: successful_resource_exec_logs.append({ "changed": True, "id": self.id, "body": str(patch_resp), "message": "%s with id %s updated." % (self.get_resource_name(), self.id), "resource_type": self.get_resource_name() }) else: successful_resource_exec_logs.append({ "changed": False, "id": self.id, "message": "%s with id %s already exists." % (self.get_resource_name(), self.id), "resource_type": self.get_resource_name() }) except Exception as err: srel = successful_resource_exec_logs self.module.fail_json(msg="Failed to update %s with id %s." "Request body [%s]. Error[%s]." % (self.get_resource_name(), self.id, self.nsx_resource_params, to_native( err) ), successfully_updated_resources=srel) def _achieve_absent_state(self, successful_resource_exec_logs): if self.skip_delete(): return if self.existing_resource is None: successful_resource_exec_logs.append({ "changed": False, "msg": 'No %s exist with id %s' % (self.get_resource_name(), self.id), "resource_type": self.get_resource_name() }) return if self.module.check_mode: successful_resource_exec_logs.append({ "changed": True, "debug_out": self.resource_params, "id": self.id, "resource_type": self.get_resource_name() }) return try: self._send_request_to_API(suffix="/" + self.id, method='DELETE') self._wait_till_delete() successful_resource_exec_logs.append({ "changed": True, "id": self.id, "message": "%s with id %s deleted." % (self.get_resource_name(), self.id) }) except Exception as err: srel = successful_resource_exec_logs self.module.fail_json(msg="Failed to delete %s with id %s. " "Error[%s]." % (self.get_resource_name(), self.id, to_native(err)), successfully_updated_resources=srel) def _send_request_to_API(self, suffix="", ignore_error=False, method='GET', data=None, resource_base_url=None, accepted_error_codes=set()): try: if not resource_base_url: if self.get_resource_name() not in BASE_RESOURCES: resource_base_url = (self.resource_class. get_resource_base_url( parent_info=self._parent_info)) else: resource_base_url = (self.resource_class. get_resource_base_url( baseline_args=self.baseline_args)) if not suffix: rc, resp = self.policy_communicator.get_all_results( resource_base_url, ignore_errors=ignore_error) else: rc, resp = self.policy_communicator.request( resource_base_url + suffix, ignore_errors=ignore_error, method=method, data=data) return rc, resp except DuplicateRequestError: self.module.fail_json(msg='Duplicate request') except Exception as e: if (e.args[0] not in accepted_error_codes and self.get_resource_name() in BASE_RESOURCES): msg = ('Received {} from NSX Manager. Please try ' 'again. '.format(e.args[0])) if len(e.args) == 2 and e.args[1] and ( 'error_message' in e.args[1]): msg += e.args[1]['error_message'] self.module.fail_json(msg=msg) raise e def get_all_resources_from_nsx(self): rc, resp = self._send_request_to_API() if rc != 200: self.module.fail_json( "Invalid URL to retrieve all configured {} NSX " "resources".format(self.get_spec_identifier())) return resp def _achieve_state(self, resource_params, successful_resource_exec_logs=[]): """ Achieves `present` or `absent` state as specified in the YAML. """ if self.id == self.INCORRECT_ARGUMENT_NAME_VALUE: # The resource was not specified in the YAML. # So, no need to realize it. return if (self._state == "present" and self.create_or_update_subresource_first()): self.achieve_subresource_state( resource_params, successful_resource_exec_logs) if self._state == "absent" and self.delete_subresource_first(): self.achieve_subresource_state( resource_params, successful_resource_exec_logs) if self._state == 'present': self._achieve_present_state( successful_resource_exec_logs) else: self._achieve_absent_state(successful_resource_exec_logs) if self._state == "present" and not ( self.create_or_update_subresource_first()): self.achieve_subresource_state( resource_params, successful_resource_exec_logs=successful_resource_exec_logs) if self._state == "absent" and not self.delete_subresource_first(): self.achieve_subresource_state( resource_params, successful_resource_exec_logs) if self.get_resource_name() in BASE_RESOURCES: changed = False for successful_resource_exec_log in successful_resource_exec_logs: if successful_resource_exec_log["changed"]: changed = True break srel = successful_resource_exec_logs self.module.exit_json(changed=changed, successfully_updated_resources=srel) def _get_sub_resources_class_of(self, resource_class): subresources = [] for attr in resource_class.__dict__.values(): if (inspect.isclass(attr) and issubclass(attr, NSXTBaseRealizableResource)): subresources.append(attr) if hasattr(self, "_state") and self._state == "present": subresources.sort(key=lambda subresource: subresource().get_resource_update_priority(), reverse=True) else: subresources.sort(key=lambda subresource: subresource().get_resource_update_priority(), reverse=False) for subresource in subresources: yield subresource def _wait_till_delete(self): """ Periodically checks if the resource still exists on the API server every 10 seconds. Returns after it has been deleted. """ while True: try: self._send_request_to_API( suffix="/" + self.id, accepted_error_codes=set([404])) time.sleep(10) except DuplicateRequestError: self.module.fail_json(msg='Duplicate request') except Exception: return def _wait_till_create(self): FAILED_STATES = ["failed"] IN_PROGRESS_STATES = ["pending", "in_progress"] SUCCESS_STATES = ["partial_success", "success"] try: count = 0 while True: rc, resp = self._send_request_to_API( suffix="/" + self.id, accepted_error_codes=set([404])) if 'state' in resp: if any(resp['state'] in progress_status for progress_status in IN_PROGRESS_STATES): time.sleep(10) count = count + 1 if count == 90: # Wait for max 15 minutes for host to realize return False elif any(resp['state'] in progress_status for progress_status in SUCCESS_STATES): return True else: # Failed State return False else: if rc != 200: time.sleep(1) count = count + 1 if count == 90: # Wait for max 15 minutes for host to realize return False else: return True except Exception as err: return False def _fill_missing_resource_params(self, existing_params, resource_params): """ resource_params: dict existing_params: dict Fills resource_params with the key:value from existing_params if missing in the former. """ if not existing_params: return for k, v in existing_params.items(): if k not in resource_params: resource_params[k] = v elif type(v).__name__ == 'dict': self._fill_missing_resource_params(v, resource_params[k]) def _clean_none_resource_params(self, existing_params, resource_params): keys_to_remove = [] for k, v in resource_params.items(): if v is None and ( existing_params is None or k not in existing_params): keys_to_remove.append(k) for key in keys_to_remove: resource_params.pop(key) for k, v in resource_params.items(): if type(v).__name__ == 'dict': self._clean_none_resource_params(existing_params, v) ================================================ FILE: plugins/module_utils/nsxt_resource_urls.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Hidden or not exposed URLS _SITE_URL = '/infra/sites' _DOMAIN_URL = '/infra/domains' _ENFORCEMENT_POINT_URL = _SITE_URL + '/{}/enforcement-points' IP_BLOCK_URL = '/infra/ip-blocks' IP_POOL_URL = '/infra/ip-pools' IP_ADDRESS_POOL_SUBNET_URL = IP_POOL_URL + '/{}/ip-subnets' POLICY_GROUP_URL = _DOMAIN_URL + '/{}/groups' SECURITY_POLICY_URL = _DOMAIN_URL + '/{}/security-policies' SEGMENT_URL = '/infra/segments' SEGMENT_PORT_URL = SEGMENT_URL + '/{}/ports' TRANSPORT_ZONE_URL = _ENFORCEMENT_POINT_URL + '/{}/transport-zones' L2_BRIDGE_EP_PROFILE_URL = _ENFORCEMENT_POINT_URL + '/{}/edge-bridge-profiles' TIER_0_URL = '/infra/tier-0s' TIER_0_STATIC_ROUTE_URL = TIER_0_URL + '/{}/static-routes' TIER_0_LOCALE_SERVICE_URL = TIER_0_URL + '/{}/locale-services' TIER_0_LS_INTERFACE_URL = TIER_0_LOCALE_SERVICE_URL + '/{}/interfaces' TIER_0_BGP_NEIGHBOR_URL = TIER_0_LOCALE_SERVICE_URL + '/{}/bgp/neighbors' TIER_0_BFD_PEERS = TIER_0_STATIC_ROUTE_URL + '/bfd-peers' TIER_1_URL = '/infra/tier-1s' TIER_1_STATIC_ROUTE_URL = TIER_1_URL + '/{}/static-routes' TIER_1_LOCALE_SERVICE_URL = TIER_1_URL + '/{}/locale-services' TIER_1_LS_INTERFACE_URL = TIER_1_LOCALE_SERVICE_URL + '/{}/interfaces' TIER_1_BGP_NEIGHBOR_URL = TIER_1_LOCALE_SERVICE_URL + '/{}/bgp/neighbors' IPV6_DAD_PROFILE_URL = '/infra/ipv6-dad-profiles' IPV6_NDRA_PROFILE_URL = '/infra/ipv6-ndra-profiles' DHCP_RELAY_CONFIG_URL = '/infra/dhcp-relay-configs' EDGE_CLUSTER_URL = _ENFORCEMENT_POINT_URL + '/{}/edge-clusters' EDGE_NODE_URL = EDGE_CLUSTER_URL + '/{}/edge-nodes' VM_LIST_URL = '/virtual-machines' VM_UPDATE_URL = '/virtual-machines' BFD_PROFILE_URL = '/infra/bfd-profiles' GATEWAY_POLICY_URL = _DOMAIN_URL + '/{}/gateway-policies' ================================================ FILE: plugins/module_utils/policy_communicator.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json import hashlib from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import get_certificate_file_path from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import is_json import six.moves.urllib.parse as urlparse class PolicyCommunicator: __instances = dict() @staticmethod def check_for_authorization_header(request_headers): if 'Authorization' in request_headers: return True return False @staticmethod def get_instance(mgr_hostname, mgr_username=None, mgr_password=None, nsx_cert_path=None, nsx_key_path=None, request_headers={}, ca_path=None, validate_certs=True): """ Returns an instance of PolicyCommunicator associated with (mgr_hostname, mgr_username, mgr_password) or (mgr_hostname, nsx_cert_path, nsx_key_path) """ if mgr_username is not None: if mgr_password is None: raise InvalidInstanceRequest("mgr_password ") key = tuple([mgr_hostname, mgr_username, mgr_password]) elif nsx_cert_path is not None: if not nsx_cert_path.endswith('.p12') and nsx_key_path is None: raise InvalidInstanceRequest("nsx_key_path") key = tuple([mgr_hostname, nsx_cert_path, nsx_key_path]) elif get_certificate_file_path('NSX_MANAGER_CERT_PATH') is not None: nsx_cert_path = get_certificate_file_path('NSX_MANAGER_CERT_PATH') key = tuple([mgr_hostname, nsx_cert_path]) elif PolicyCommunicator.check_for_authorization_header( request_headers): key = tuple([request_headers['Authorization']]) else: raise InvalidInstanceRequest("(mgr_username, mgr_password) or" "(nsx_cert_path, nsx_key_path), or " "environment variable " "'NSX_MANAGER_CERT_PATH'") if key not in PolicyCommunicator.__instances: PolicyCommunicator(key, mgr_hostname, mgr_username, mgr_password, nsx_cert_path, nsx_key_path, request_headers, ca_path, validate_certs) return PolicyCommunicator.__instances.get(key) def __init__(self, key, mgr_hostname, mgr_username, mgr_password, nsx_cert_path, nsx_key_path, request_headers, ca_path, validate_certs): if key in PolicyCommunicator.__instances: raise Exception("The associated PolicyCommunicator is" " already present! Please use getInstance to" " retrieve it.") else: self.use_basic_auth = False if mgr_username is not None: self.use_basic_auth = True self.mgr_username = mgr_username self.mgr_password = mgr_password self.nsx_cert_path = nsx_cert_path self.nsx_key_path = nsx_key_path self.request_headers = request_headers or {} self.request_headers.update({ 'Accept': 'application/json', 'Content-Type': 'application/json'}) self.ca_path = ca_path self.validate_certs = validate_certs self.policy_url = 'https://{}/policy/api/v1'.format(mgr_hostname) self.fabric_url = 'https://{}/api/v1/fabric'.format(mgr_hostname) self.active_requests = set() PolicyCommunicator.__instances[key] = self @staticmethod def get_vmware_argument_spec(): return dict( hostname=dict(type='str', required=True), username=dict(type='str', required=False), password=dict(type='str', required=False, no_log=True), port=dict(type='int', default=443), validate_certs=dict(type='bool', required=False, default=True), nsx_cert_path=dict(type='str', required=False), nsx_key_path=dict(type='str', required=False), request_headers=dict(type='dict'), ca_path=dict(type='str') ) def get_all_results(self, url, ignore_errors=False): NULL_CURSOR_PREFIX = '0000' rc, concatenate_response = self.request( url, ignore_errors=ignore_errors) if rc != 200: return rc, None cursor = concatenate_response.get('cursor', NULL_CURSOR_PREFIX) op = '&' if urlparse.urlparse(url).query else '?' url += op + 'cursor=' while cursor and not cursor.startswith(NULL_CURSOR_PREFIX): rc, page = self.request(url + cursor, ignore_errors) if rc != 200: return rc, None concatenate_response['results'].extend(page.get('results', [])) cursor = page.get('cursor', NULL_CURSOR_PREFIX) return rc, concatenate_response['results'] def request(self, url, data=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=300, http_agent=None, ignore_errors=False, base_url='policy'): if base_url == 'policy': # prepend the policy url # this is the default behavior if base_url is not specified url = self.policy_url + url elif base_url == 'fabric': # prepend the fabric url url = self.fabric_url + url else: raise Exception("invalid base_url specified in request call") # create a request ID associated with this request request_id = self._get_request_id(url, data, method) if self.register_request(request_id): # new request try: # connect to the API server if data is not None: data = json.dumps(data) response = open_url(url=url, data=data, headers=self.request_headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=self.validate_certs, url_username=self.mgr_username, url_password=self.mgr_password, http_agent=http_agent, force_basic_auth=self.use_basic_auth, client_cert=self.nsx_cert_path, client_key=self.nsx_key_path, ca_path=self.ca_path) except HTTPError as err: response = err resp_code = response.getcode() resp_raw_data = response.read().decode('utf-8') # request completed by the server self.active_requests.remove(request_id) try: resp_data = resp_raw_data # infer the response if resp_raw_data and is_json(resp_raw_data): resp_data = json.loads(resp_raw_data) except Exception as e: if not ignore_errors: raise Exception(resp_code, resp_raw_data) # return the approprate response code and data if resp_code >= 400 and not ignore_errors: raise Exception(resp_code, resp_data) if resp_data is not None and 'error_code' in resp_data: raise Exception(resp_data['error_code'], resp_data) else: return resp_code, resp_data else: raise DuplicateRequestError def _get_request_id(self, url, data=None, method='GET'): """ Creates a hash from url, data, and method that can be used as a request ID. """ request = dict() request["data"] = data request['request_url'] = url request['request_method'] = method return hashlib.sha256( json.dumps(request, sort_keys=True).encode('utf-8')).hexdigest() def register_request(self, request_id): """ This creates a hash from URL and data and stores it in a cache. If a same hash is created, the request is identified as a duplicate and it returns False. Otherwise, returns True. """ if request_id in self.active_requests: return False self.active_requests.add(request_id) return True class DuplicateRequestError(Exception): pass class InvalidInstanceRequest(Exception): message = "Invalid instance Request, missing {}" def __init__(self, missing_fields): super(Exception, self).__init__(self.message.format(missing_fields)) ================================================ FILE: plugins/module_utils/policy_resource_specs/__init__.py ================================================ ================================================ FILE: plugins/module_utils/policy_resource_specs/l2_bridge_ep_profile.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SPEC = dict( edge_nodes_info=dict( required=True, type='list', elements='dict', options=dict( # Note that only default site_id and # enforcementpoint_id are used site_id=dict( type='str', default="default" ), enforcementpoint_id=dict( type='str', default="default" ), edge_cluster_id=dict( type='str' ), edge_cluster_display_name=dict( type='str' ), edge_node_id=dict( type='str' ), edge_node_display_name=dict( type='str' ) ) ), enforcementpoint_id=dict( type='str', default="default" ), failover_mode=dict( required=False, default="PREEMPTIVE", choices=["PREEMPTIVE", "NON_PREEMPTIVE"], type='str' ), ha_mode=dict( required=False, type='str', default="ACTIVE_STANDBY", choices=["ACTIVE_STANDBY"] ), site_id=dict( type='str', default="default" ), ) ================================================ FILE: plugins/module_utils/policy_resource_specs/security_policy.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SPEC = dict( category=dict( required=False, type='str' ), comments=dict( required=False, type='str' ), connectivity_strategy=dict( required=False, type='str', choices=['WHITELIST', 'BLACKLIST', 'WHITELIST_ENABLE_LOGGING', 'BLACKLIST_ENABLE_LOGGING', 'NONE'] ), domain_id=dict( required=False, type='str', default="default" ), locked=dict( required=False, type='bool' ), scheduler_path=dict( required=False, type='str' ), scope=dict( required=False, type='list' ), sequence_number=dict( required=False, type='int' ), stateful=dict( required=False, type='bool' ), rules=dict( required=False, type='list', elements='dict', options=dict( action=dict( required=True, type='str', choices=["ALLOW", "DROP", "REJECT"] ), description=dict( required=False, type='str' ), destination_groups=dict( required=True, type='list' ), destinations_excluded=dict( required=False, type='bool', default=False ), direction=dict( required=False, default="IN_OUT", type='str', choices=["IN_OUT", "IN", "OUT"] ), disabled=dict( required=False, type='bool', default=False ), display_name=dict( type='str' ), id=dict( type='str' ), ip_protocol=dict( type='str', choices=['IPV4', 'IPV6', 'IPV4_IPV6'] ), logged=dict( type='bool', default=False ), notes=dict( type='str' ), profiles=dict( type='list', elements='str' ), scope=dict( type='list', elements='str' ), sequence_number=dict( required=False, type='int' ), service_entries=dict( type='list', elements='dict' ), services=dict( required=True, type='list' ), source_groups=dict( required=True, type='list' ), sources_excluded=dict( required=False, type='bool', default=False ), tag=dict( type='str' ), tags=dict( type='list', elements='dict', options=dict( scope=dict( type='str' ), tag=dict( type='str' ) ) ), ) ), tcp_strict=dict( required=False, type='bool' ) ) ================================================ FILE: plugins/module_utils/upgrade_reverse_order.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function import time from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_upgrade_orchestrator_node from ansible.module_utils._text import to_native UPGRADE_API = '/upgrade/plan?action=upgrade' UPGRADE_STATUS_SUMMARY_API = '/upgrade/status-summary' MP_UPGRADE_DONE = False def check_upgrade_status_at_start(module, manager_url, mgr_username, mgr_password, validate_certs): global MP_UPGRADE_DONE endpoint = "/upgrade/upgrade-unit-groups?sync=true" call_get_sync(manager_url, endpoint, mgr_username, mgr_password, validate_certs) upgrade_status_summary = get_upgrade_status_summary(module, manager_url, mgr_username, mgr_password, validate_certs , False) overall_upgrade_status = upgrade_status_summary['overall_upgrade_status'] if overall_upgrade_status == 'PAUSED' : if upgrade_status_summary['component_status'][0]['status'] == 'SUCCESS': MP_UPGRADE_DONE = True return overall_upgrade_status def get_upgrade_status_summary(module, manager_url, mgr_username, mgr_password, validate_certs , ignore_errors): ''' Get the upgrade status summary ''' try: (rc, resp) = request(manager_url+ UPGRADE_STATUS_SUMMARY_API, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=ignore_errors) except Exception as err: if ignore_errors: return None else: module.fail_json(msg='Error while triggering api:' ' %s. Error [%s]' % (manager_url+ UPGRADE_STATUS_SUMMARY_API, to_native(err))) return resp def call_get_sync(managerUrl, endpoint, mgrUsername, mgrPassword, validateCerts): request(managerUrl + endpoint, method='GET', url_username=mgrUsername, url_password=mgrPassword, validate_certs=validateCerts, ignore_errors=True) def check_component_upgrade_completion_status(module, manager_url, mgr_username, mgr_password, validate_certs): global MP_UPGRADE_DONE count_upgrade_status_api_no_resp = 0 component_upgrade_start_time = time.time() while True: upgrade_status_summary = get_upgrade_status_summary(module, manager_url, mgr_username, mgr_password, validate_certs , True) if upgrade_status_summary == None: count_upgrade_status_api_no_resp +=1 elif upgrade_status_summary.__contains__('overall_upgrade_status'): overall_upgrade_status = upgrade_status_summary['overall_upgrade_status'] if overall_upgrade_status == 'SUCCESS': module.exit_json(changed=True, message='System has been upgraded successfully!!!') elif overall_upgrade_status == 'PAUSED': check_component_statuses(module , upgrade_status_summary['component_status']) if not MP_UPGRADE_DONE: MP_UPGRADE_DONE = True return elapsed_time = time.time() - component_upgrade_start_time if MP_UPGRADE_DONE and count_upgrade_status_api_no_resp >= 5: module.fail_json(msg='Error while triggering api:' ' %s. API failed 5 times' %UPGRADE_STATUS_SUMMARY_API) elif not MP_UPGRADE_DONE and elapsed_time > 3600 : module.fail_json(msg='MP component upgrade took longer than 1hr, System upgrade failed') time.sleep(30) def check_component_statuses(module, component_status_list): for component_status in component_status_list: if component_status['status'] == 'FAILED': module.fail_json(msg='Failed to upgrade system as Component : %s' 'has Status : %s ' %(component_status['component_type'], component_status['status'])) def execute_upgrade(module, manager_url, mgr_username, mgr_password, validate_certs): global MP_UPGRADE_DONE headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' while True: try: (rc, resp) = request(manager_url+ UPGRADE_API, data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=False) except Exception as err: module.fail_json(msg="Failed while upgrading component") time.sleep(30) check_component_upgrade_completion_status(module, manager_url, mgr_username, mgr_password, validate_certs) def trigger_upgrade_reverse_order(module, mgr_hostname, mgr_username, mgr_password, validate_certs): headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) upgrade_status = check_upgrade_status_at_start(module, manager_url, mgr_username, mgr_password, validate_certs) if upgrade_status == 'IN_PROGRESS' or upgrade_status == 'PAUSING': module.fail_json(msg='Upgrade is in state: %s, can\'t continue' % upgrade_status) elif upgrade_status == 'SUCCESS': module.exit_json(changed=False, message='Upgrade state is SUCCESS. No need to' ' continue.') elif upgrade_status == 'NOT_STARTED' or upgrade_status == 'PAUSED': execute_upgrade(module, manager_url, mgr_username, mgr_password, validate_certs) ================================================ FILE: plugins/module_utils/vcenter_utils.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import ssl import requests import atexit try: from pyvim import connect except: from pyVim import connect from pyVmomi import vmodl from pyVmomi import vim def establish_vcenter_connection(module, vCenter_host, username, password, ignore_ssl_verification): """ params: - vCenter_host: vCenter host IP - username: vCenter username - password: vCenter password result: Retrieves vCenter information from service instance and returns as content object. """ if ignore_ssl_verification is False: try: sslContext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslContext.verify_mode = ssl.CERT_NONE service_instance = connect.SmartConnect(host=vCenter_host, user=username, pwd=password, port=443, sslContext=sslContext) if not service_instance: module.fail_json(msg="Could not connect to the specified vCenter " "host using specified username and password") atexit.register(connect.Disconnect, service_instance) except vmodl.MethodFault as error: module.fail_json(msg="Caught vmodl fault while connecting to vCenter: " + error.msg) else: try: service_instance = connect.SmartConnect(host=vCenter_host, user=username, pwd=password, port=443) if not service_instance: module.fail_json(msg="Could not connect to the specified vCenter " "host using specified username and password") atexit.register(connect.Disconnect, service_instance) except (requests.ConnectionError, ssl.SSLError): try: sslContext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslContext.verify_mode = ssl.CERT_NONE service_instance = connect.SmartConnect(host=vCenter_host, user=username, pwd=password, port=443, sslContext=sslContext) if not service_instance: module.fail_json(msg="Could not connect to the specified vCenter " "host using specified username and password") atexit.register(connect.Disconnect, service_instance) except vmodl.MethodFault as error: module.fail_json(msg="Caught vmodl fault while connecting to vCenter: " + error.msg) return service_instance.RetrieveContent() def get_resource_id_from_name(module, vCenter_host, username, password, resource_type, resource_name, ignore_ssl_verification): """ params: - resource_type: Type of vCenter resource. Accepted values 'host', 'cluster', 'storage' and 'network'. - resource_name: Name of the resource. result: - moref id of the resource name and type given. """ try: content = establish_vcenter_connection(module, vCenter_host, username, password, ignore_ssl_verification) if resource_type == 'host': objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.HostSystem], True) elif resource_type == 'cluster': objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.ClusterComputeResource], True) elif resource_type == 'storage': objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.Datastore], True) elif resource_type == 'network': objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.Network], True) else: module.fail_json(msg='Resource type provided by user either doesn\'t' ' exist or is not supported') all_resources = objview.view objview.Destroy() for resource in all_resources: if resource.name == resource_name: return resource._moId module.fail_json(msg='%s doesnt exist in %s' % (resource_name, resource_type)) except vmodl.MethodFault as error: print("Caught vmodl fault while fetching info from vCenter: " + error.msg) return -1 def get_data_network_id_from_name(module, vCenter_host, username, password, data_network_name_list, ignore_ssl_verification): """ params: - data_network_name_list: List of data network names result: list of data network ids. """ try: content = establish_vcenter_connection(module, vCenter_host, username, password, ignore_ssl_verification) objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.Network], True) all_networks = objview.view objview.Destroy() network_dict = {} for network in all_networks: network_dict[network.name] = network._moId data_network_id_list = [] for data_network_name in data_network_name_list: if data_network_name in network_dict: data_network_id_list.append(str(network_dict[data_network_name])) else: module.fail_json(msg='data network %s doesnt exist in the available' 'list of networks' % data_network_name) return data_network_id_list except vmodl.MethodFault as error: print("Caught vmodl fault while fetching info from vCenter: " + error.msg) ================================================ FILE: plugins/module_utils/vmware_nsxt.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json, os, re from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils._text import to_native import six.moves.urllib.parse as urlparse def vmware_argument_spec(): return dict( hostname=dict(type='str', required=True), username=dict(type='str', required=False), password=dict(type='str', required=False, no_log=True), port=dict(type='int', default=443), validate_certs=dict(type='bool', required=False, default=True), ) def request(url, data=None, headers=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=300, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): ''' The main function which hits the request to the manager. Username and password are given the topmost priority. In case username and password are not provided if the environment variable is set. Authentication fails if the details are not correct. ''' if url_username is None or url_password is None: force_basic_auth = False client_cert = get_certificate_file_path('NSX_MANAGER_CERT_PATH') if client_cert is None: raise Exception('It seems that either you have not passed your username password correctly or ' 'your path for NSX_MANAGER_CERT_PATH is not set correctly.') else: client_cert = None if method == 'GET': return get_all_results( url, data, headers, method, use_proxy, force, last_mod_time, timeout, validate_certs, url_username, url_password, http_agent, force_basic_auth, ignore_errors, client_cert) return _request( url, data, headers, method, use_proxy, force, last_mod_time, timeout, validate_certs, url_username, url_password, http_agent, force_basic_auth, ignore_errors, client_cert) def get_all_results( url, data, headers, method, use_proxy, force, last_mod_time, timeout, validate_certs, url_username, url_password, http_agent, force_basic_auth, ignore_errors, client_cert): rc, resp = _request( url, data, headers, method, use_proxy, force, last_mod_time, timeout, validate_certs, url_username, url_password, http_agent, force_basic_auth, ignore_errors, client_cert) if rc != 200: return rc, None cursor = resp.get('cursor') op = '&' if urlparse.urlparse(url).query else '?' url += op + 'cursor=' NULL_CURSOR_PREFIX = '0000' while cursor and not cursor.startswith(NULL_CURSOR_PREFIX): rc, page = _request( url + cursor, data, headers, method, use_proxy, force, last_mod_time, timeout, validate_certs, url_username, url_password, http_agent, force_basic_auth, ignore_errors, client_cert) if rc != 200: return rc, None resp['results'].extend(page.get('results', [])) cursor = page.get('cursor') return rc, resp def _request(url, data, headers, method, use_proxy, force, last_mod_time, timeout, validate_certs, url_username, url_password, http_agent, force_basic_auth, ignore_errors, client_cert): ca_path = get_certificate_file_path('NSX_MANAGER_CA_PATH') resp_data = None try: r = open_url( url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent, client_cert=client_cert, force_basic_auth=force_basic_auth, ca_path=ca_path) except HTTPError as err: r = err try: raw_data = r.read().decode('utf-8') if raw_data: if is_json(raw_data): resp_data = json.loads(raw_data) else: resp_data = raw_data except Exception: if not ignore_errors: raise resp_code = r.getcode() if resp_code >= 400 and not ignore_errors: raise Exception(resp_code, resp_data) if not (resp_data is None) and resp_data.__contains__('error_code'): raise Exception (resp_data['error_code'], resp_data) return resp_code, resp_data def get_certificate_string(crt_file): ''' param: crt_file is the file containing the public key string result: returns the public key(client certificate) string to be passed to the payload how: String matching ''' f = open(crt_file, 'r') file_content = f.read() file_content = file_content.split("\n") certificate_string = "" got_line_start = False for string in file_content: if string == "-----BEGIN CERTIFICATE-----": got_line_start = True certificate_string = certificate_string + string + "\n" elif string == "-----END CERTIFICATE-----": certificate_string = certificate_string + "\n" + string break elif got_line_start: certificate_string = certificate_string + string else: pass f.close() return certificate_string def get_private_key_string(p12_file): ''' param: p12_file is the file containing the private key string result: returns the private key string to be passed to the payload how: String matching ''' f = open(p12_file, 'r') file_content = f.read() file_content = file_content.split("\n") certificate_string = "" got_start_line = False for string in file_content: if re.match("-+BEGIN[ \w]+PRIVATE[ ]+KEY-+", string): got_start_line = True certificate_string = certificate_string + string + "\n" elif re.match("-+END[ \w]+PRIVATE[ ]+KEY-+", string): certificate_string = certificate_string + "\n" + string break elif got_start_line: certificate_string = certificate_string + string else: pass f.close() return certificate_string def get_certificate_file_path(environment_variable): return os.getenv(environment_variable) def get_vc_ip_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): ''' param: display_name: Display name of the vC result: IP of the vC name provided ''' try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error occured while retrieving vCenter IP for %s. ' 'Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['server'] if exit_if_not_found: module.fail_json(msg='vCenter with display name %s doesn\'t exist.' % display_name) return -1 def is_json(myjson): ''' Param: myjson: String to be checked result: Checks if the string is valid json or not. ''' try: json_object = json.loads(myjson) except ValueError as e: return False return True def version_tuple(v): return tuple(map(int, (v.split("."))))[:3] # Ignore build number def get_nsx_version(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/node/version', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Failed to retrieve NSX version. Error [%s]' % (to_native(err))) return resp def validate_nsx_mp_support(module, manager_url, mgr_username, mgr_password, validate_certs, err_msg=None): version = get_nsx_version(module, manager_url, mgr_username, mgr_password, validate_certs) # MP resources deprecated since v9.0.0 if version_tuple(version["product_version"]) >= version_tuple("9.0.0"): if err_msg is None: err_msg = 'NSX v9.0.0 and above do not support MP resources.' module.fail_json(msg=err_msg) ================================================ FILE: plugins/modules/__init__.py ================================================ ================================================ FILE: plugins/modules/nsxt_certificates.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_certificates short_description: 'Add a New Certificate' description: "Adds a new private-public certificate or a chain of certificates (CAs) and, optionally, a private key that can be applied to one of the user-facing components (appliance management or edge). The certificate and the key should be stored in PEM format. If no private key is provided, the certificate is used as a client certificate in the trust store." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Identifier to use when displaying entity in logs or GUI' required: true type: str pem_encoded_file: description: 'File containing pem encoded certificate data' required: true type: str private_key_file: description: 'File containing private key data' required: false type: str passphrase: description: 'Password for private key encryption' required: false type: str description: description: 'Description of this resource' required: false type: str id: description: 'Unique identifier of this resource' required: false type: str key_algo: description: 'Key algorithm contained in this certificate' required: false type: str resource_type: description: 'Must be set to the value TrustObjectData' required: false type: str tags: description: Opaque identifier meaninful to API user required: false type: Array of Tag state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Add a new certificate nsxt_certificates: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "Certificate_file" pem_encoded_file: "/Path/to/crt/file" passphrase: "paraphrase" state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, get_certificate_string, get_private_key_string from ansible.module_utils._text import to_native def update_params_with_pem_encoding(certificate_params): ''' params: Parameters passed to the certificate result: Updated parameters. Files are replaced with the public and private strings. ''' certificate_params['pem_encoded'] = get_certificate_string (certificate_params.pop('pem_encoded_file', None)) if certificate_params.get('private_key_file') is not None: certificate_params['private_key'] = get_private_key_string (certificate_params.pop('private_key_file', None)) return certificate_params def get_certificate_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_certificates(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/trust-management/certificates', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing trust management certificates. Error [%s]' % (to_native(err))) return resp def get_certificate_with_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): ''' result: returns the certificate object with the display name provided ''' certificates = get_certificates(module, manager_url, mgr_username, mgr_password, validate_certs) for certificate in certificates['results']: if certificate.__contains__('display_name') and certificate['display_name'] == display_name: return certificate return None def main(): argument_spec = dict() argument_spec.update(hostname=dict(type='str', required=True), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), port=dict(type='int', default=443), validate_certs=dict(type='bool', requried=False, default=True), display_name=dict(required=True, type='str'), pem_encoded_file=dict(required=False, type='str', no_log=True), private_key_file=dict(required=False, type='str', no_log=True), passphrase=dict(required=False, type='str', no_log=True), description=dict(required=False, type='str'), id=dict(required=False, type='str'), key_algo=dict(required=False, type='str'), resource_type=dict(required=False, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) ''' Core function of the module reponsible for adding and deleting the certififcate. ''' module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) certificate_params = get_certificate_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) certificate_with_display_name = get_certificate_with_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name) if state == 'present': # add the certificate if certificate_with_display_name: module.fail_json(msg="Certificate with display name \'%s\' already exists." % display_name) try: certificate_params = update_params_with_pem_encoding(certificate_params) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' request_data = json.dumps(certificate_params) (rc, resp) = request(manager_url+ '/trust-management/certificates?action=import', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add certificate.\n Error: [%s].\n Request_body[%s]." % (to_native(err), request_data)) time.sleep(5) module.exit_json(changed=True, result=resp, message="certificate created. Response: [%s]" % str(resp)) elif state == 'absent': #Delete the certificate if not certificate_with_display_name: module.fail_json(msg="Certificate with display name \'%s\' doesn't exists." % display_name) certificate_id = certificate_with_display_name['id'] try: (rc, resp) = request(manager_url+ '/trust-management/certificates/' + certificate_id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to delete certificate with display name \'%s\'. Error[%s]." % (display_name, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=certificate_id, message="Certificate with certificate id: %s deleted." % certificate_id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_certificates_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_certificates_facts short_description: List all existing certificates description: Returns all certificate information viewable by the user, including each certificate's UUID; resource_type (for example, certificate_self_signed, certificate_ca, or certificate_signed); pem_encoded data; and history of the certificate (who created or modified it and when). For additional information, include the ?details=true modifier at the end of the request URI. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List all existing certificates nsxt_certificates_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/trust-management/certificates', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zone. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_cluster_profiles.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_cluster_profiles short_description: 'Create a Cluster Profile' description: "Create a cluster profile. The resource_type is required." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Display name' required: true type: str description: description: Description of the resource required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Create Cluster Profiles nsxt_cluster_profiles: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ` resource_type: EdgeHighAvailabilityProfile display_name: edge-cluster-profile-East bfd_probe_interval: 1000 bfd_declare_dead_multiple: 3 bfd_allowed_hops: 1 state: present ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_cluster_profiles_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_cluster_profiles(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/cluster-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing edge clusters. Error [%s]' % (to_native(err))) return resp def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] module.fail_json(msg='No id exist with display name %s' % display_name) def cmp_dict(dict1, dict2): for k2, v2 in dict2.items(): found = False if k2 not in dict1: continue if type(v2) != list and dict1[k2] != dict2[k2]: return False for obj2 in v2: for obj1 in dict1[k2]: if all(item in obj1.items() for item in obj2.items()): found = True if not found: return False return True def get_cluster_profiles_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): cluster_profiles = get_cluster_profiles(module, manager_url, mgr_username, mgr_password, validate_certs) for cluster_profile in cluster_profiles['results']: if cluster_profile.__contains__('display_name') and cluster_profile['display_name'] == display_name: return cluster_profile return None # def ordered(obj): # if isinstance(obj, dict): # return sorted((k, ordered(v)) for k, v in obj.items()) # if isinstance(obj, list): # return sorted(ordered(x) for x in obj) # else: # return obj def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, cluster_profiles_body): existing_edge_cluster = get_cluster_profiles_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, cluster_profiles_body['display_name']) if existing_edge_cluster is None: return False if existing_edge_cluster.__contains__('description') and not cluster_profiles_body.__contains__('description'): return True if not existing_edge_cluster.__contains__('description') and cluster_profiles_body.__contains__('description'): return True if existing_edge_cluster.__contains__('description') and cluster_profiles_body.__contains__('description') and \ existing_edge_cluster['description'] != cluster_profiles_body['description']: return True if existing_edge_cluster.__contains__('bfd_allowed_hops') and not cluster_profiles_body.__contains__('bfd_allowed_hops'): return True if not existing_edge_cluster.__contains__('bfd_allowed_hops') and cluster_profiles_body.__contains__('bfd_allowed_hops'): return True if existing_edge_cluster.__contains__('bfd_allowed_hops') and cluster_profiles_body.__contains__('bfd_allowed_hops') and \ existing_edge_cluster['bfd_allowed_hops'] != cluster_profiles_body['bfd_allowed_hops']: return True if existing_edge_cluster.__contains__('bfd_declare_dead_multiple') and not cluster_profiles_body.__contains__('bfd_declare_dead_multiple'): return True if not existing_edge_cluster.__contains__('bfd_declare_dead_multiple') and cluster_profiles_body.__contains__('bfd_declare_dead_multiple'): return True if existing_edge_cluster.__contains__('bfd_declare_dead_multiple') and cluster_profiles_body.__contains__('bfd_declare_dead_multiple') and \ existing_edge_cluster['bfd_declare_dead_multiple'] != cluster_profiles_body['bfd_declare_dead_multiple']: return True if existing_edge_cluster.__contains__('standby_relocation_config') and not cluster_profiles_body.__contains__('standby_relocation_config'): return True if not existing_edge_cluster.__contains__('standby_relocation_config') and cluster_profiles_body.__contains__('standby_relocation_config'): return True if existing_edge_cluster.__contains__('standby_relocation_config') and cluster_profiles_body.__contains__('standby_relocation_config') and \ not cmp_dict(existing_edge_cluster['standby_relocation_config'], cluster_profiles_body['standby_relocation_config']): return True return False def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, cluster_profile_params): return cluster_profile_params def get_profile_id_from_profile_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): cluster_profiles = get_cluster_profiles(module, manager_url, mgr_username, mgr_password, validate_certs) for cluster_profile in cluster_profiles['results']: if cluster_profile.__contains__('display_name') and cluster_profile['display_name'] == display_name: return cluster_profile['id'] module.fail_json(msg='No id exist with display name %s' % display_name) def update_params_with_profile_id(module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_params): if edge_cluster_params.__contains__('cluster_profile_bindings'): for cluster_profile in edge_cluster_params['cluster_profile_bindings']: cluster_profile_name = cluster_profile.pop('profile_name', None) cluster_profile['profile_id'] = get_profile_id_from_profile_name(module, manager_url, mgr_username, mgr_password, validate_certs, cluster_profile_name) return edge_cluster_params def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), resource_type=dict(required=True, type='str'), bfd_allowed_hops=dict(required=False, type='int'), bfd_declare_dead_multiple=dict(required=False, type='int'), bfd_probe_interval=dict(required=False, type='int'), standby_relocation_config=dict(required=False, type=dict, standby_relocation_threshold=dict(required=False, type='int')), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) cluster_profile_params = get_cluster_profiles_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) cluster_profiles_dict = get_cluster_profiles_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) cluster_profile_id, revision = None, None if cluster_profiles_dict: cluster_profile_id = cluster_profiles_dict['id'] revision = cluster_profiles_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, cluster_profile_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the edge_cluster request_data = json.dumps(body) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if cluster_profile_id: module.exit_json(changed=False, id=cluster_profile_id, message="Cluster profile with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/cluster-profiles', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add cluster profile. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Cluster profile with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(edge_cluster_params)), id=cluster_profile_id) body['_revision'] = revision # update current revision request_data = json.dumps(body) id = cluster_profile_id try: (rc, resp) = request(manager_url+ '/cluster-profiles/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update cluster profile with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Cluster profile with id %s updated." % id) elif state == 'absent': # delete the edge cluster id = cluster_profile_id if id is None: module.exit_json(changed=False, msg='No cluster profile exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(edge_cluster_params)), id=id) try: (rc, resp) = request(manager_url + "/cluster-profiles/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete cluster profile with id %s. Error[%s]." % (id, to_native(err))) module.exit_json(changed=True, id=id, message="Cluster profile with id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_cluster_profiles_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_cluster_profiles_facts short_description: List Cluster Profiles description: Returns paginated list of cluster profiles Cluster profiles define policies for edge cluster and bridge cluster. version_added: "2.7" author: Kommireddy Akhilesh options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Cluster Profiles nsxt_cluster_profiles_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/cluster-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of edge cluster. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_deploy_ova.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_deploy_ova short_description: Deploys NSXT Manager description: Deploys NSXT Manager version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ovftool_path: description: Path of ovf tool type: 'str' folder: description: vCenter folder required: false type: 'str' datacenter: description: Datacenter name required: true type: 'str' datastore: description: Data Store required: true type: 'str' portgroup: description: Port group required: true type: 'str' portgroup_ext: description: External Portgroup type: 'str' portgroup_transport: description: Transport Port Group type: 'str' cluster: description: vCenter Cluster required: true type: 'str' vmname: description: Name of VM required: true type: 'str' hostname: description: Name of host required: true type: 'str' dns_server: description: DNS server address required: true type: 'str' ntp_server: description: NTP Server Address required: true type: 'str' dns_domain: description: DNS Domain name required: true type: 'str' gateway: description: Gateway Address required: true type: 'str' gateway6_0: description: Gateway6 Address required: false type: 'str' ip_address: description: IP Address required: true type: 'str' ip_address6_0: description: IPv6 Address required: false type: 'str' netmask: description: Netmask required: true type: 'str' netmask6_0: description: Netmask6 required: true type: 'str' admin_password: description: Admin Password required: true type: 'str' no_log: true cli_password: description: CLI Password required: true type: 'str' no_log: true ssh_enabled: description: If ssh is enabled default: false allow_ssh_root_login: description: If SSH root login is allowed default: false deployment_size: description: Size of the deployment default: 'medium' type: 'str' path_to_ova: description: Path to OVA file required: true type: 'str' ova_file: description: OVA File name required: true type: 'str' disk_mode: description: Disk mode to used. Thin or thick. default: 'thin' vcenter: description: vCenter name required: true type: 'str' vcenter_user: description: vCenter username required: true type: 'str' vcenter_passwd: description: vCenter password required: true type: 'str' no_log: true extra_para: description: Extra Parameters required: false type: 'str' role: description: Roles required: true type: 'str' ip_protocol: description: IP Protocol required: false type: 'str' requirements: - PyVmOmi - Python library for vCenter api. - OVF Tools - Ovftool is used for ovf deployment. ''' EXAMPLES = ''' - name: Deploy NSX Manager OVA deploy_ova: ovftool_path: "{{ ovfToolPath }}" datacenter: "private_dc" datastore: "data store" portgroup: "VM Network" cluster: "nsxt_cluster" vmname: "nsxt-manager" hostname: "nsxt-manager-10" dns_server: "10.161.244.213" dns_domain: "eng.vmware.com" ntp_server: "123.108.200.124" gateway: "10.112.203.253" gateway6_0: "2620:124:6020:1045::253" ip_address: "10.112.201.24" ip_address6_0: "2620:124:6020:1045::1a" netmask: "255.255.224.0" netmask6_0: "64" admin_password: "Admin!23Admin" cli_password: "Admin!23Admin" path_to_ova: "http://build-squid.eng.vmware.com/build/mts/release/bora-8411846/publish/nsx-unified-appliance/exports/ovf" ova_file: "nsx-unified-appliance-2.2.0.0.0.8411854.ovf" vcenter: "10.161.244.213" vcenter_user: "administrator@vsphere.local" vcenter_passwd: "Admin!23" deployment_size: "small" role: "nsx-manager nsx-controller" ip_protocol: IPv6 ''' RETURN = '''# ''' import requests import ssl from pyVim.connect import SmartConnect from pyVmomi import vim, vmodl def find_virtual_machine(content, searched_vm_name): virtual_machines = get_all_objs(content, [vim.VirtualMachine]) for vm in virtual_machines: if vm.name == searched_vm_name: return vm return None def get_all_objs(content, vimtype): obj = {} container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) for managed_object_ref in container.view: obj.update({managed_object_ref: managed_object_ref.name}) return obj def connect_to_api(vchost, vc_user, vc_pwd): try: service_instance = SmartConnect(host=vchost, user=vc_user, pwd=vc_pwd) except (requests.ConnectionError, ssl.SSLError): try: context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) context.load_default_certs() service_instance = SmartConnect(host=vchost, user=vc_user, pwd=vc_pwd, sslContext=context) except Exception as e: raise Exception(e) return service_instance.RetrieveContent() def main(): module = AnsibleModule( argument_spec=dict( ovftool_path=dict(type='str'), folder=dict(required=False, type='str'), datacenter=dict(required=True, type='str'), datastore=dict(required=True, type='str'), portgroup=dict(required=True, type='str'), portgroup_ext=dict(type='str'), portgroup_transport=dict(type='str'), cluster=dict(required=True, type='str'), vmname=dict(required=True, type='str'), hostname=dict(required=True, type='str'), dns_server=dict(required=True, type='str'), ntp_server=dict(required=True, type='str'), dns_domain=dict(required=True, type='str'), gateway=dict(required=True, type='str'), gateway6_0=dict(type='str'), ip_address=dict(required=True, type='str'), ip_address6_0=dict(type='str'), netmask=dict(required=True, type='str'), netmask6_0=dict(type='str'), admin_password=dict(required=True, type='str', no_log=True), cli_password=dict(required=True, type='str', no_log=True), ssh_enabled=dict(default=False), allow_ssh_root_login=dict(default=False), deployment_size=dict(default='medium', type='str'), path_to_ova=dict(required=True, type='str'), ova_file=dict(required=True, type='str'), disk_mode=dict(default='thin'), vcenter=dict(required=True, type='str'), vcenter_user=dict(required=True, type='str'), vcenter_passwd=dict(required=True, type='str', no_log=True), extra_para=dict(type='str'), role=dict(required=True, type='str'), ip_protocol=dict(required=False, type='str') ), supports_check_mode=True, required_together=[['gateway6_0', 'ip_address6_0', 'netmask6_0'], ['portgroup_ext', 'portgroup_transport']] ) try: content = connect_to_api(module.params['vcenter'], module.params['vcenter_user'], module.params['vcenter_passwd']) except vim.fault.InvalidLogin: module.fail_json(msg='exception while connecting to vCenter, login failure, check username and password') except requests.exceptions.ConnectionError: module.fail_json(msg='exception while connecting to vCenter, check hostname, FQDN or IP') nsx_manager_vm = find_virtual_machine(content, module.params['vmname']) if nsx_manager_vm: module.exit_json(changed=False, msg='A VM with the name {} was already present'.format(module.params['vmname'])) ovftool_exec = '{}/ovftool'.format(module.params['ovftool_path']) ovf_command = [ovftool_exec] ovf_base_options = ['--acceptAllEulas', '--skipManifestCheck', '--X:injectOvfEnv', '--powerOn', '--noSSLVerify', '--allowExtraConfig', '--diskMode={}'.format(module.params['disk_mode']), '--datastore={}'.format(module.params['datastore']), '--name={}'.format(module.params['vmname']) ] if module.params['ip_protocol']: ovf_base_options.extend(['--ipProtocol={}'.format(module.params['ip_protocol'])]) if module.params['portgroup_ext']: ovf_base_options.extend(['--net:Network 0={}'.format(module.params['portgroup']), '--net:Network 1={}'.format(module.params['portgroup_ext']), '--net:Network 2={}'.format(module.params['portgroup_transport']), '--net:Network 3={}'.format(module.params['portgroup'])]) else: ovf_base_options.extend(['--network={}'.format(module.params['portgroup'])]) ovf_command.extend(ovf_base_options) ovf_deployement_size = ['--deploymentOption={}'.format(module.params['deployment_size'])] ovf_command.extend(ovf_deployement_size) ovf_ext_prop = ['--prop:nsx_hostname={}'.format(module.params['hostname']), '--prop:nsx_dns1_0={}'.format(module.params['dns_server']), '--prop:nsx_domain_0={}'.format(module.params['dns_domain']), '--prop:nsx_ntp_0={}'.format(module.params['ntp_server']), '--prop:nsx_gateway_0={}'.format(module.params['gateway']), '--prop:nsx_ip_0={}'.format(module.params['ip_address']), '--prop:nsx_netmask_0={}'.format(module.params['netmask']), '--prop:nsx_passwd_0={}'.format(module.params['admin_password']), '--prop:nsx_cli_passwd_0={}'.format(module.params['cli_password']), '--prop:nsx_isSSHEnabled={}'.format(module.params['ssh_enabled']), '--prop:nsx_allowSSHRootLogin={}'.format(module.params['allow_ssh_root_login']), '--prop:nsx_role={}'.format(module.params['role'])] ovf_command.extend(ovf_ext_prop) if module.params['extra_para']: ovf_command.extend(['--prop:extraPara={}'.format(module.params['extra_para'])]) if module.params['gateway6_0']: ovf_command.extend(['--prop:nsx_gateway6_0={}'.format(module.params['gateway6_0'])]) if module.params['ip_address6_0']: ovf_command.extend(['--prop:nsx_ip6_0={}'.format(module.params['ip_address6_0'])]) if module.params['netmask6_0']: ovf_command.extend(['--prop:nsx_netmask6_0={}'.format(module.params['netmask6_0'])]) ova_file = '{}/{}'.format(module.params['path_to_ova'], module.params['ova_file']) ovf_command.append(ova_file) vi_string = 'vi://{}:{}@{}/'.format(module.params['vcenter_user'], module.params['vcenter_passwd'], module.params['vcenter']) if module.params.__contains__('folder') and module.params['folder']: vi_string = vi_string + module.params['folder'] vi_string = vi_string + '/{}/host/{}/'.format(module.params['datacenter'], module.params['cluster']) ovf_command.append(vi_string) if module.check_mode: module.exit_json(changed=True, debug_out=ovf_command) ova_tool_result = module.run_command(ovf_command) if ova_tool_result[0] != 0: module.fail_json(msg='Failed to deploy OVA, error message from ovftool is: {}, the comand was {}'.format(ova_tool_result[1], ovf_command)) module.exit_json(changed=True, ova_tool_result=ova_tool_result) from ansible.module_utils.basic import * if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_edge_clusters.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_edge_clusters short_description: 'Create Edge Cluster' description: "Creates a new edge cluster. It only supports homogeneous members. The TransportNodes backed by EdgeNode are only allowed in cluster members. DeploymentType (VIRTUAL_MACHINE|PHYSICAL_MACHINE) of these EdgeNodes is recommended to be the same. EdgeCluster supports members of different deployment types." version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str cluster_profile_bindings: description: 'Edge cluster profile bindings' required: false type: 'array of ClusterProfileTypeIdEntry' display_name: description: 'Display name' required: true type: str description: description: Description of the resource required: false type: str members: description: "EdgeCluster only supports homogeneous members. These member should be backed by either EdgeNode or PublicCloudGatewayNode. TransportNode type of these nodes should be the same. DeploymentType (VIRTUAL_MACHINE|PHYSICAL_MACHINE) of these EdgeNodes is recommended to be the same. EdgeCluster supports members of different deployment types." required: false type: 'array of EdgeClusterMember' allocation_rules: description: Allocation rules for auto placement required: false type: list enable_inter_site_forwarding: description: Flag to enable inter site forwarding required: false type: bool node_rtep_ips: description: Remote tunnel endpoint ip address required: false type: list member_node_type: description: Node type of the cluster members required: false type: dict EdgeDeploymentType: description: Supported edge deployment type. required: false type: str deployment_type: description: Deplloyment type of the cluster members required: false type: dict EdgeDeploymentType: description: Supported edge deployment type. required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Create Edge Cluster nsxt_edge_clusters: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: edge-cluster-1 ` cluster_profile_bindings: - profile_name: "nsx-edge-profile" resource_type: EdgeHighAvailabilityProfile members: - transport_node_name: "TN_1" state: present ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_edge_cluster_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_edge_clusters(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/edge-clusters', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing edge clusters. Error [%s]' % (to_native(err))) return resp def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] module.fail_json(msg='No id exist with display name %s' % display_name) def get_edge_clusters_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): edge_clusters = get_edge_clusters(module, manager_url, mgr_username, mgr_password, validate_certs) for edge_cluster in edge_clusters['results']: if edge_cluster.__contains__('display_name') and edge_cluster['display_name'] == display_name: return edge_cluster return None # def ordered(obj): # if isinstance(obj, dict): # return sorted((k, ordered(v)) for k, v in obj.items()) # if isinstance(obj, list): # return sorted(ordered(x) for x in obj) # else: # return obj def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_with_id): existing_edge_cluster = get_edge_clusters_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_with_id['display_name']) if existing_edge_cluster is None: return False if existing_edge_cluster.__contains__('description') and not edge_cluster_with_id.__contains__('description'): return True if not existing_edge_cluster.__contains__('description') and edge_cluster_with_id.__contains__('description'): return True if existing_edge_cluster.__contains__('description') and edge_cluster_with_id.__contains__('description') and \ existing_edge_cluster['description'] != edge_cluster_with_id['description']: return True if existing_edge_cluster.__contains__('members') and not edge_cluster_with_id.__contains__('members'): return True if not existing_edge_cluster.__contains__('members') and edge_cluster_with_id.__contains__('members'): return True if existing_edge_cluster.__contains__('members') and edge_cluster_with_id.__contains__('members'): if len(existing_edge_cluster['members']) != len(edge_cluster_with_id['members']): return True for count, member in enumerate(existing_edge_cluster['members']): if member['transport_node_id'] != edge_cluster_with_id['members'][count]['transport_node_id']: return True return False def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_params): if edge_cluster_params.__contains__('members'): for transport_node in edge_cluster_params['members']: transport_node_name = transport_node.pop('transport_node_name', None) transport_node['transport_node_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-nodes", transport_node_name) return edge_cluster_params def get_cluster_profiles(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/cluster-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing edge clusters. Error [%s]' % (to_native(err))) return resp def get_profile_id_from_profile_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): cluster_profiles = get_cluster_profiles(module, manager_url, mgr_username, mgr_password, validate_certs) for cluster_profile in cluster_profiles['results']: if cluster_profile.__contains__('display_name') and cluster_profile['display_name'] == display_name: return cluster_profile['id'] module.fail_json(msg='No id exist with display name %s' % display_name) def update_params_with_profile_id(module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_params): if edge_cluster_params.__contains__('cluster_profile_bindings'): for cluster_profile in edge_cluster_params['cluster_profile_bindings']: cluster_profile_name = cluster_profile.pop('profile_name', None) cluster_profile['profile_id'] = get_profile_id_from_profile_name(module, manager_url, mgr_username, mgr_password, validate_certs, cluster_profile_name) return edge_cluster_params def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), cluster_profile_bindings=dict(required=False, type='list'), members=dict(required=False, type='list'), # tranpost_node_name allocation_rules=dict(required=False, type='list'), deployment_type=dict(required=False, type='dict', EdgeDeploymentType=dict(required=False, type='str')), enable_inter_site_forwarding=dict(required=False, type='bool'), member_node_type=dict(required=False, type='dict', EdgeClusterNodeType=dict(required=False, type='str')), node_rtep_ips=dict(required=False, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) edge_cluster_params = get_edge_cluster_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) edge_cluster_dict = get_edge_clusters_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) edge_cluster_id, revision = None, None if edge_cluster_dict: edge_cluster_id = edge_cluster_dict['id'] revision = edge_cluster_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_params) body = update_params_with_profile_id(module, manager_url, mgr_username, mgr_password, validate_certs, edge_cluster_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the edge_cluster request_data = json.dumps(body) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if edge_cluster_id: module.exit_json(changed=False, id=edge_cluster_id, message="Edge cluster with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/edge-clusters', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add edge cluster. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="edge cluster with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(edge_cluster_params)), id=edge_cluster_id) body['_revision'] = revision # update current revision request_data = json.dumps(body) id = edge_cluster_id try: (rc, resp) = request(manager_url+ '/edge-clusters/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update edge cluster with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Edge cluster with edge cluster id %s updated." % id) elif state == 'absent': # delete the edge cluster id = edge_cluster_id if id is None: module.exit_json(changed=False, msg='No edge cluster exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(edge_cluster_params)), id=id) try: (rc, resp) = request(manager_url + "/edge-clusters/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete edge cluster with id %s. Error[%s]." % (id, to_native(err))) module.exit_json(changed=True, id=id, message="edge cluster with edge cluster id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_edge_clusters_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_edge_clusters_facts short_description: List Edge Clusters description: Returns information about the configured edge clusters, which enable you to group together transport nodes of the type EdgeNode and apply fabric profiles to all members of the edge cluster. Each edge node can participate in only one edge cluster. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Edge Clusters nsxt_edge_clusters_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/edge-clusters', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of edge cluster. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_fabric_compute_managers.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_fabric_compute_managers short_description: 'Register compute manager with NSX' description: "Registers compute manager with NSX. Inventory service will collect data from the registered compute manager" version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str credential: asymmetric_credential: description: 'Asymmetric login credential' required: false type: str credential_key: description: 'Credential key' no_log: 'True' required: false type: str credential_type: description: 'Possible values are UsernamePasswordLoginCredential, VerifiableAsymmetricLoginCredential.' required: true type: str credential_verifier: description: 'Credential verifier' required: false type: str description: 'Login credentials for the compute manager' password: description: "Password for the user (optionally specified on PUT, unspecified on GET)" no_log: 'True' required: false type: str required: false thumbprint: description: 'Hexadecimal SHA256 hash of the vIDM server''s X.509 certificate' no_log: 'True' required: false type: str type: dict username: description: 'Username value of the log' required: false type: str display_name: description: 'Display name' required: true type: str origin_type: description: 'Compute manager type like vCenter' required: true type: str description: description: 'Description of the resource' required: false type: str server: description: 'IP address or hostname of compute manager' required: true type: str set_as_oidc_provider: description: "Specifies whether compute manager has been set as OIDC provider If the compute manager is VC and need to set set as OIDC provider for NSX then this flag should be set as true. This is specific to TKGS. NSX-T 3.0 only" required: false type: bool state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Register compute manager with NSX nsxt_fabric_compute_managers: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "vCenter" server: "10.161.244.213" description: "Description of the resource" origin_type: vCenter credential: credential_type: "UsernamePasswordLoginCredential" username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "36:43:34:D9:C2:06:27:4B:EE:C3:4A:AE:23:BF:76:A0:0C:4D:D6:8A:D3:16:55:97:62:07:C2:84:0C:D8:BA:66" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_fabric_compute_manager_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_thumb(module): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(10) wrappedSocket = ssl.wrap_socket(sock) try: wrappedSocket.connect((module.params['server'], 443)) except: module.fail_json(msg='Connection error while fatching thumbprint for server [%s].' % module.params['server']) else: der_cert_bin = wrappedSocket.getpeercert(True) pem_cert = ssl.DER_cert_to_PEM_cert(wrappedSocket.getpeercert(True)) print(pem_cert) #Thumbprint thumb_sha256 = hashlib.sha256(der_cert_bin).hexdigest() wrappedSocket.close() # The API call expects the Thumbprint in Uppercase. While the API call is fixed, # below is a quick fix thumbprint = "" thumbprint = ':'.join(a+b for a,b in zip(thumb_sha256[::2], thumb_sha256[1::2])) return thumbprint.upper() def get_fabric_compute_managers(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/fabric/compute-managers', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing fabric compute manager. Error [%s]' % (to_native(err))) return resp def get_compute_manager_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): compute_managers = get_fabric_compute_managers(module, manager_url, mgr_username, mgr_password, validate_certs) for compute_manager in compute_managers['results']: if compute_manager.__contains__('display_name') and compute_manager['display_name'] == display_name: return compute_manager return None def wait_till_create(id, module, manager_url, mgr_username, mgr_password, validate_certs): try: down_counter = 0 while True: (rc, resp) = request(manager_url+ '/fabric/compute-managers/%s/status'% id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if resp['registration_status'] == "REGISTERING" or resp['registration_status'] == "UNREGISTERED": time.sleep(10) elif resp['registration_status'] == "REGISTERED": if resp["connection_status"] == "CONNECTING": time.sleep(10) elif resp["connection_status"] == "DOWN" and down_counter < 3: time.sleep(10) down_counter = down_counter + 1 elif resp["connection_status"] == "UP": time.sleep(5) return else: module.fail_json(msg= 'Error connecting to compute manager. Connection status : %s'%(str(resp["connection_status"]))) else: module.fail_json(msg= 'Error in compute manager status: %s'%(str(resp['registration_status']))) except Exception as err: module.fail_json(msg='Error accessing compute manager status. Error [%s]' % (to_native(err))) def wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs): try: while True: (rc, resp) = request(manager_url+ '/fabric/compute-managers/%s/status'% id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) time.sleep(10) except Exception as err: time.sleep(5) return def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, compute_manager_with_ids): existing_compute_manager = get_compute_manager_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, compute_manager_with_ids['display_name']) if existing_compute_manager is None: return False if not existing_compute_manager.__contains__('description') and compute_manager_with_ids.__contains__('description'): return True if existing_compute_manager.__contains__('description') and compute_manager_with_ids.__contains__('description') and \ existing_compute_manager['description'] != compute_manager_with_ids['description']: return True if existing_compute_manager['server'] != compute_manager_with_ids['server'] or \ existing_compute_manager['credential']['thumbprint'] != compute_manager_with_ids['credential']['thumbprint'] or \ existing_compute_manager['origin_type'] != compute_manager_with_ids['origin_type']: return True if existing_compute_manager.__contains__('set_as_oidc_provider') and compute_manager_with_ids.__contains__('set_as_oidc_provider') and \ existing_compute_manager['set_as_oidc_provider'] != compute_manager_with_ids['set_as_oidc_provider']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), credential=dict(required=False, type='dict', no_log=True, username=dict(required=False, type='str'), password=dict(required=False, type='str'), thumbprint=dict(required=False, type='str'), asymmetric_credential=dict(required=False, type='str'), credential_verifier=dict(required=False, type='str'), credential_key=dict(required=False, type='str', no_log=True), credential_type=dict(required=True, type='str')), origin_type=dict(required=True, type='str'), description=dict(required=False, type='str'), server=dict(required=True, type='str'), set_as_oidc_provider=dict(required=False, type='bool'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) fabric_compute_manager_params = get_fabric_compute_manager_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) if not fabric_compute_manager_params['credential'].__contains__('thumbprint'): fabric_compute_manager_params['credential']['thumbprint'] = get_thumb(module) compute_manager_dict = get_compute_manager_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) compute_manager_id, revision = None, None if compute_manager_dict: compute_manager_id = compute_manager_dict['id'] revision = compute_manager_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, fabric_compute_manager_params) if not updated: # add the compute_manager request_data = json.dumps(fabric_compute_manager_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if compute_manager_id: module.exit_json(changed=False, id=compute_manager_id, message="Compute manager with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/fabric/compute-managers', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add compute_manager. Request body [%s]. Error[%s]." % (request_data, to_native(err))) wait_till_create(resp['id'], module, manager_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="fabric compute manager with ip %s created." % module.params['server']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(fabric_compute_manager_params)), id=compute_manager_id) fabric_compute_manager_params['_revision'] = revision # update current revision request_data = json.dumps(fabric_compute_manager_params) id = compute_manager_id try: (rc, resp) = request(manager_url+ '/fabric/compute-managers/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update compute_manager with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="fabric compute manager with compute manager id %s updated." % id) elif state == 'absent': # delete the array id = compute_manager_id if id is None: module.exit_json(changed=False, msg='No compute manager exist with display_name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(fabric_compute_manager_params)), id=id) try: (rc, resp) = request(manager_url + "/fabric/compute-managers/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete fabric compute manager with id %s. Error[%s]." % (id, to_native(err))) wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=id, message="fabric compute manager with compute manager id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_fabric_compute_managers_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_fabric_compute_managers_facts short_description: Return the List of Compute managers description: Returns information about all compute managers. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Lists all compute managers nsxt_fabric_compute_managers_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils.urls import open_url, fetch_url from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/fabric/compute-managers', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing fabric compute manager. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_global_manager_active.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_global_manager_active short_description: 'Make the global manager as Active' description: "Make the global manager as Active. This module has to be called using the details of global manager which is to be made active" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Fully Qualified Domain Name of the Management Node which is to be made active' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str id: description: 'Unique identifier of this global manager' required: true type: str ''' EXAMPLES = ''' - name: Make the global manager as Active nsxt_global_manager_active: fqdn: "10.192.167.137" username: "admin" password: "Admin!23Admin" id: "GM-1" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_global_managers(module, url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing global manager. Error [%s]' % (to_native(err))) return resp def get_global_manager_from_id(module, url, mgr_username, mgr_password, validate_certs, id): global_managers = get_global_managers(module, url, mgr_username, mgr_password, validate_certs) for global_manager in global_managers['results']: if global_manager.__contains__('id') and global_manager['id'] == id: return global_manager return None def wait_till_switchover_complete(module, url, mgr_username, mgr_password, validate_certs): try: retry_count = 0 while True: (rc, resp) = request(url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if (resp['overall_status'] == "ONGOING" or resp['overall_status'] == "NOT_STARTED") and retry_count < 100: time.sleep(10) elif resp['overall_status'] == "COMPLETE": return else: all_errors = '' if resp['errors'] is not None: for e in resp['errors']: all_errors = all_errors + e module.fail_json(msg='Switchover was not completed due to errors : %s' % all_errors) except Exception as err: module.fail_json(msg='Error checking switchover status. Error [%s]' % (to_native(err))) def main(): argument_spec = vmware_argument_spec() argument_spec.update(id=dict(required=True, type='str')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] id = module.params['id'] manager_url = 'https://{}/global-manager/api/v1'.format(mgr_hostname) global_manager_url = manager_url + '/global-infra/global-managers' switchover_api_url = 'https://{}/api/v1/sites/switchover-status'.format(mgr_hostname) existing_global_manager = get_global_manager_from_id(module, global_manager_url, mgr_username, mgr_password, validate_certs, id) global_manager_id, revision = None, None if existing_global_manager is None: module.fail_json(msg="Global_manager with id [%s] not found." % id) global_manager_id = existing_global_manager['id'] revision = existing_global_manager['_revision'] existing_global_manager["display_name"] if existing_global_manager["mode"] == "ACTIVE": module.exit_json(changed=False, id=global_manager_id, message="Global manager with id %s is already in ACTIVE mode." % existing_global_manager["id"]) else: headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' request_data_dict = existing_global_manager request_data_dict["mode"] = "ACTIVE" request_data_dict.pop("connection_info", None) request_data = json.dumps(request_data_dict) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id=global_manager_id) try: (rc, resp) = request(global_manager_url + '/%s' % global_manager_id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to set global_manager as active. Request body [%s]. Error[%s]." % ( request_data, to_native(err))) wait_till_switchover_complete(module, switchover_api_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=resp["id"], body=str(resp), message="Global manager with id %s was made active." % module.params[ 'id']) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_global_manager_enable_service.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_global_manager_enable_service short_description: 'Enables global manager service first time after deployment and makes it active' description: "Enables global manager service first time after deployment and makes it active'" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Deployed NSX Global manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Identifier to use when displaying entity in logs or GUI. Defaults to ID if not set' required: false type: str id: description: 'Unique identifier of this global manager' required: true type: str ''' EXAMPLES = ''' - name: Enables global manager service first time after deployment and makes it active nsxt_global_manager_enable_service: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "GM First" id: "GM-1" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_global_manager_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_global_managers(module, url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing global manager. Error [%s]' % (to_native(err))) return resp def get_global_manager_from_id(module, url, mgr_username, mgr_password, validate_certs, id): global_managers = get_global_managers(module, url, mgr_username, mgr_password, validate_certs) for global_manager in global_managers['results']: if global_manager.__contains__('id') and global_manager['id'] == id: return global_manager return None def main(): argument_spec = vmware_argument_spec() argument_spec.update(username=dict(required=False, type='str'), password=dict(required=False, type='str'), hostname=dict(required=True, type='str'), display_name=dict(required=False, type='str'), id=dict(required=True, type='str')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) global_manager_params = get_global_manager_params(module.params.copy()) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] id = module.params['id'] manager_url = 'https://{}/global-manager/api/v1'.format(mgr_hostname) global_manager_api_url = manager_url + '/global-infra/global-managers' existing_global_manager = get_global_manager_from_id(module, global_manager_api_url, mgr_username, mgr_password, validate_certs, id) global_manager_id, revision = None, None if existing_global_manager: if existing_global_manager['mode'] == 'ACTIVE': module.exit_json(changed=False, message="Global manager with id %s already exists in ACTIVE mode." % module.params['id']) else: module.fail_json(msg="Global manager with id %s as a standby mode. Use other module to make it active " % module.params['id']) else: headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' global_manager_params["mode"] = "ACTIVE" # add the global_manager request_data = json.dumps(global_manager_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id=module.params['id']) try: (rc, resp) = request(global_manager_api_url + '/%s' % module.params['id'], data=request_data, headers=headers, method='PATCH', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json( msg="Failed to activate global manager service. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=module.params['id'], body=str(resp), message="Global manager with id %s is activated." % module.params['id']) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_global_manager_registration.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_global_manager_registration short_description: 'Register a standby global manager cluster' description: "Register a standby global manager cluster" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Deployed NSX Global manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str connection_info: description: 'Connection info (fqdn, password...)' fqdn: description: 'IP address or hostname of global manager(cluster)' required: true type: str password: description: "Password for the user" no_log: 'True' required: false type: str required: false thumbprint: description: 'Thumbprint of global manager in the form of a SHA-256 hash represented in lower case HEX' no_log: 'True' required: false type: str username: description: 'Username to connect to global manager' required: false type: str display_name: description: 'Identifier to use when displaying entity in logs or GUI. Defaults to ID if not set' required: false type: str description: description: 'Description of this resource' required: false type: str fail_if_rtt_exceeded: description: 'Fail onboarding if maximum RTT exceeded.' required: false type: bool id: description: 'Unique identifier of this resource' required: true type: str maximum_rtt: description: "Maximum acceptable packet round trip time (RTT). If provided and fail_if_rtt_exceeded is true, onboarding of the site will fail if measured RTT is greater than this value. Minimum: 0 Maximum: 1000 Default: 250" required: false type: int mode: choices: - ACTIVE - STANDBY description: "ACTIVE or STANDBY" required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Register a standby global manager cluster with exisitng global manager nsxt_global_manager_registration: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "GM Second" id: "GM-1" mode: "STANDBY" connection_info: fqdn: "10.10.10.20" username: "admin" password: "Admin!23" thumbprint: "1a4eeaef05ad711c84d688cfb72001d17a4965a963611d9af63fb86ff55276cf" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_global_manager_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) # connection_info is an array args["connection_info"] = [args["connection_info"]] return args def get_global_managers(module, url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing global manager. Error [%s]' % (to_native(err))) return resp def get_global_manager_from_id(module, url, mgr_username, mgr_password, validate_certs, id): global_managers = get_global_managers(module, url, mgr_username, mgr_password, validate_certs) for global_manager in global_managers['results']: if global_manager.__contains__('id') and global_manager['id'] == id: return global_manager return None def check_for_update(module, url, mgr_username, mgr_password, validate_certs, global_manager_with_ids): existing_global_manager = get_global_manager_from_id(module, url, mgr_username, mgr_password, validate_certs, global_manager_with_ids['id']) if existing_global_manager is None: return False return True def main(): argument_spec = vmware_argument_spec() argument_spec.update(connection_info=dict(required=True, type='dict', no_log=True, username=dict(required=False, type='str'), password=dict(required=False, type='str'), thumbprint=dict(required=False, type='str'), fqdn=dict(required=True, type='str')), display_name=dict(required=False, type='str'), fail_if_rtt_exceeded=dict(required=False, type='str'), id=dict(required=True, type='str'), maximum_rtt=dict(required=False, type='int'), mode=dict(required=False, choices=['ACTIVE', 'STANDBY']), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) global_manager_params = get_global_manager_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] id = module.params['id'] manager_url = 'https://{}/global-manager/api/v1'.format(mgr_hostname) global_manager_api_url = manager_url + '/global-infra/global-managers' global_manager_dict = get_global_manager_from_id(module, global_manager_api_url, mgr_username, mgr_password, validate_certs, id) global_manager_id, revision = None, None if global_manager_dict: global_manager_id = global_manager_dict['id'] revision = global_manager_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, global_manager_api_url, mgr_username, mgr_password, validate_certs, global_manager_params) if not updated: # add the global_manager request_data = json.dumps(global_manager_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if global_manager_id: module.exit_json(changed=False, id=global_manager_id, message="Global manager with id %s already exist." % module.params['id'] ) (rc, resp) = request(global_manager_api_url + '/%s' % module.params['id'], data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json( msg="Failed to add global_manager. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body=str(resp), message="Global manager with id %s is added." % module.params['id']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(global_manager_params)), id=global_manager_id) global_manager_params['_revision'] = revision # update current revision request_data = json.dumps(global_manager_params) id = global_manager_id try: (rc, resp) = request(global_manager_api_url + '/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update global manager with id %s. Request body [%s]. Error[%s]." % ( id, request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body=str(resp), message="Global manager with id %s updated." % id) elif state == 'absent': # delete the array if global_manager_id is None: module.exit_json(changed=False, msg='No global manager exists with id %s' % id) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(global_manager_params)), id=id) try: (rc, resp) = request(global_manager_api_url + "/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete global manager with id %s. Error[%s]." % (id, to_native(err))) module.exit_json(changed=True, id=id, message="Global manager with id %s is deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_ip_blocks.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_ip_blocks short_description: 'Create a new IP address block.' description: "Creates a new IPv4 address block using the specified cidr. cidr is a required parameter. display_name & description are optional parameters" version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str cidr: description: "Represents network address and the prefix length which will be associated\nwith a layer-2 broadcast domain" required: true type: str display_name: description: 'Display name' required: true type: str description: description: 'Description of the resource' required: false type: str tags: description: 'Opaque identifier meaningful to the API user' required: false type: list state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Create a new IP address block nsxt_ip_blocks: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "IPBlock-Tenant-1" description: "IPBlock-Tenant-1 Description" cidr: "192.168.0.0/16" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, validate_nsx_mp_support from ansible.module_utils._text import to_native def get_ip_block_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_ip_blocks(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/pools/ip-blocks', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing ip blocks. Error [%s]' % (to_native(err))) return resp def get_ip_block_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): ip_blocks = get_ip_blocks(module, manager_url, mgr_username, mgr_password, validate_certs) for ip_block in ip_blocks['results']: if ip_block.__contains__('display_name') and ip_block['display_name'] == display_name: return ip_block return None def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, ip_block_params): existing_ip_block = get_ip_block_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, ip_block_params['display_name']) if existing_ip_block is None: return False if existing_ip_block['cidr'] != ip_block_params['cidr']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), cidr=dict(required=True, type='str'), description=dict(required=False, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) ip_block_params = get_ip_block_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) err_msg = 'NSX v9.0.0 and above do not support MP resources in nsxt_ip_blocks.py. Please use nsxt_policy_ip_block.py module.' validate_nsx_mp_support(module, manager_url, mgr_username, mgr_password, validate_certs, err_msg) block_dict = get_ip_block_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) block_id, revision = None, None if block_dict: block_id = block_dict['id'] revision = block_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, ip_block_params) if not updated: # add the block if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(ip_block_params)), id='12345') request_data = json.dumps(ip_block_params) try: if block_id: module.exit_json(changed=False, id=block_id, message="IP block with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/pools/ip-blocks', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add ip block. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="IP block with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(ip_block_params)), id=block_id) ip_block_params['_revision'] = revision # update current revision request_data = json.dumps(ip_block_params) id = block_id try: (rc, resp) = request(manager_url+ '/pools/ip-blocks/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update ip block with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="ip block with block id %s updated." % id) elif state == 'absent': # delete the array id = block_id if id is None: module.exit_json(changed=False, msg='No ip block exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(ip_block_params)), id=id) try: (rc, resp) = request(manager_url + "/pools/ip-blocks/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete ip block with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="ip block with block id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_ip_blocks_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils.urls import open_url, fetch_url from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_ip_blocks_facts short_description: Returns list of configured IP address blocks. description: Returns information about configured IP address blocks. Information includes the id, display name, description & CIDR of IP address blocks version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Lists all configured IP address blocks nsxt_ip_block_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' def main(): argument_spec = vmware_argument_spec() #raise ValueError(argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] #raise ValueError(argument_spec) manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/pools/ip-blocks', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of ip blocks. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_ip_pools.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_ip_pools short_description: 'Create an IP Pool' description: "Creates a new IPv4 or IPv6 address pool. Required parameters are allocation_ranges and cidr. Optional parameters are display_name, description, dns_nameservers, dns_suffix, and gateway_ip." version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Display name' required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true subnets: description: "Subnets can be IPv4 or IPv6 and they should not overlap. The maximum number will not exceed 5 subnets." required: false type: 'array of IpPoolSubnet' tags: description: 'Opaque identifiers meaningful to the API user' required: false type: str description: description: 'description of the resource' required: false type: str ip_release_delay: description: 'IP address release delay' required: false type: int ''' EXAMPLES = ''' - name: Create ip pool nsxt_ip_pools: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: IPPool-IPV4-1 subnets: - allocation_ranges: - start: "10.112.201.28" end: "10.112.201.29" cidr: "10.112.201.0/24" state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, validate_nsx_mp_support from ansible.module_utils._text import to_native def get_ip_pool_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_ip_pools(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/pools/ip-pools', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing ip pools. Error [%s]' % (to_native(err))) return resp def get_ip_pool_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): ip_pools = get_ip_pools(module, manager_url, mgr_username, mgr_password, validate_certs) for ip_pool in ip_pools['results']: if ip_pool.__contains__('display_name') and ip_pool['display_name'] == display_name: return ip_pool return None # def ordered(obj): # if isinstance(obj, dict): # return sorted((k, ordered(v)) for k, v in obj.items()) # if isinstance(obj, list): # return sorted(ordered(x) for x in obj) # else: # return obj def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, ip_pool_params): existing_ip_pool = get_ip_pool_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, ip_pool_params['display_name']) if existing_ip_pool is None: return False if existing_ip_pool.__contains__('subnets') and ip_pool_params.__contains__('subnets') and existing_ip_pool['subnets'] != ip_pool_params['subnets']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), subnets=dict(required=False, type='list'), tags=dict(required=False, type='list'), description=dict(required=False, type='str'), ip_release_delay=dict(required=False, type='int'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) ip_pool_params = get_ip_pool_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) err_msg = 'NSX v9.0.0 and above do not support MP resources in nsxt_ip_pools.py. Please use nsxt_policy_ip_pool.py module.' validate_nsx_mp_support(module, manager_url, mgr_username, mgr_password, validate_certs, err_msg) pool_dict = get_ip_pool_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) pool_id, revision = None, None if pool_dict: pool_id = pool_dict['id'] revision = pool_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, ip_pool_params) if not updated: # add the pool if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(ip_pool_params)), id='12345') request_data = json.dumps(ip_pool_params) try: if pool_id: module.exit_json(changed=False, id=pool_id, message="IP pool with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/pools/ip-pools', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add ip pool. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="IP pool with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(ip_pool_params)), id=pool_id) ip_pool_params['_revision']=revision # update current revision request_data = json.dumps(ip_pool_params) id = pool_id try: (rc, resp) = request(manager_url+ '/pools/ip-pools/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update ip pool with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="ip pool with pool id %s updated." % id) elif state == 'absent': # delete the array id = pool_id if id is None: module.exit_json(changed=False, msg='No ip pool exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(ip_pool_params)), id=id) try: (rc, resp) = request(manager_url + "/pools/ip-pools/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete ip pool with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="ip pool with pool id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_ip_pools_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_ip_pools_facts short_description: List IP Pools description: Returns information about the configured IP address pools. Information includes the display name and description of the pool and the details of each of the subnets in the pool, including the DNS servers, allocation ranges, gateway, and CIDR subnet address. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List IP Pools nsxt_ip_pools_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils.urls import open_url, fetch_url from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/pools/ip-pools', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of ip pools. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_licenses.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_licenses short_description: 'Add a new license key' description: "This will add a license key to the system. The API supports adding only one license key for each license edition type - Standard, Advanced or Enterprise. If a new license key is tried to add for an edition for which the license key already exists, then this API will return an error." version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str license_key: description: 'license key' no_log: 'True' required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Add license nsxt_licenses: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False license_key: "11111-22222-33333-44444-55555" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_license_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def check_license_exist(module, manager_url, mgr_username, mgr_password, validate_certs): id = module.params['license_key'] try: (rc, resp) = request(manager_url+ '/licenses/%s' % id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: return False return True def main(): argument_spec = vmware_argument_spec() argument_spec.update(license_key=dict(required=True, type='str', no_log=True), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) license_params = get_license_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' request_data = json.dumps(license_params) if state == 'present': # add the license if check_license_exist(module, manager_url, mgr_username, mgr_password, validate_certs): module.exit_json(changed=False, message="license with license key %s already exist."% module.params['license_key']) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id=module.params['license_key']) try: (rc, resp) = request(manager_url+ '/licenses', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add license. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, result=resp, message="license with license key %s created." % module.params['license_key']) elif state == 'absent': # delete the license key id = module.params['license_key'] if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id=id) try: (rc, resp) = request(manager_url+ '/licenses/' + id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to delete license with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="license with license key %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_licenses_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_licenses_facts short_description: Get all licenses description: Returns all licenses. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Get all licenses nsxt_licenses_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils.urls import open_url, fetch_url from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/licenses', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing licenses. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_local_manager_registration.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_local_manager_registration short_description: 'Register a local manager with the global manager' description: "Registers a local manager with the global manager" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Deployed NSX Global manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Identifier to use when displaying entity in logs or GUI. Defaults to ID if not set' required: false type: str description: description: 'Description of this resource' required: false type: str fail_if_rtt_exceeded: description: 'Fail onboarding if maximum RTT exceeded.' required: false type: bool id: description: 'Unique identifier of this resource' required: true type: str maximum_rtt: description: "Maximum acceptable packet round trip time (RTT). If provided and fail_if_rtt_exceeded is true, onboarding of the site will fail if measured RTT is greater than this value. Minimum: 0 Maximum: 1000 Default: 250" required: false type: int site_connection_info: description: Site connecion info (fqdn, password...) fqdn: description: 'IP address or hostname of local manager' required: true type: str password: description: "Password for the user" no_log: 'True' required: false type: str required: false thumbprint: description: 'Thumbprint of local manager in the form of a SHA-256 hash represented in lower case HEX' no_log: 'True' required: false type: str username: description: 'Username value of the local manager' required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Register local manager with NSX nsxt_local_manager_registration: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False id: "LM-Mumbai" display_name: "Mumbai LM" site_connection_info: fqdn: "10.161.244.213" username: "admin" password: "Admin!23" thumbprint: "31a4eeaef05ad711c84d688cfb72001d17a4965a963611d9af63fb86ff55276cf" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_local_manager_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) # site_connection_info is an array args["site_connection_info"] = [args["site_connection_info"]] return args def get_local_managers(module, url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing local manager. Error [%s]' % (to_native(err))) return resp def get_local_manager_by_id(module, url, mgr_username, mgr_password, validate_certs, id): local_managers = get_local_managers(module, url, mgr_username, mgr_password, validate_certs) for local_manager in local_managers['results']: if local_manager.__contains__('id') and local_manager['id'] == id: return local_manager return None def check_for_update(module, url, mgr_username, mgr_password, validate_certs, local_manager_params): existing_local_manager = get_local_manager_by_id(module, url, mgr_username, mgr_password, validate_certs, local_manager_params['id']) if existing_local_manager is None: return False if existing_local_manager['site_connection_info'][0]['fqdn'] != local_manager_params['site_connection_info'][0]['fqdn'] or \ existing_local_manager['site_connection_info'][0]['thumbprint'] != local_manager_params['site_connection_info'][0]['thumbprint'] : return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), id=dict(required=True, type='str'), site_connection_info=dict(required=False, type='dict', no_log=True, username=dict(required=False, type='str'), password=dict(required=False, type='str'), thumbprint=dict(required=False, type='str'), fqdn=dict(required=True, type='str')), fail_if_rtt_exceeded=dict(required=False, type='bool'), maximum_rtt=dict(required=False, type='int'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) local_manager_params = get_local_manager_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] id = module.params['id'] manager_url = 'https://{}/global-manager/api/v1'.format(mgr_hostname) sites_api_url = manager_url + '/global-infra/sites/' local_manager_dict = get_local_manager_by_id (module, sites_api_url, mgr_username, mgr_password, validate_certs, id) local_manager_id, revision = None, None if local_manager_dict: local_manager_id = local_manager_dict['id'] revision = local_manager_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, sites_api_url, mgr_username, mgr_password, validate_certs, local_manager_params) if not updated: # add the local_manager request_data = json.dumps(local_manager_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if local_manager_id: module.exit_json(changed=False, id=local_manager_id, message="Local manager with id %s already exist."% module.params['id']) (rc, resp) = request(sites_api_url + '%s' % module.params['id'], data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add local_manager. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=None, body= str(resp), message="Local manager with id %s created." % module.params['id']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(local_manager_params)), id=local_manager_id) local_manager_params['_revision'] = revision # update current revision request_data = json.dumps(local_manager_params) id = local_manager_id try: (rc, resp) = request(sites_api_url + '%s' % id, data=request_data, headers=headers, method='PATCH', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update local_manager with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) module.exit_json(changed=True, id=None, body= str(resp), message="Local manager with id %s updated." % id) elif state == 'absent': # delete the array id = local_manager_id if id is None: module.exit_json(changed=False, msg='No local manager exist with id %s' % id) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(local_manager_params)), id=id) try: (rc, resp) = request(sites_api_url + "%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete local manager with id %s. Error[%s]." % (id, to_native(err))) module.exit_json(changed=True, id=id, message="Local manager with id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_local_managers_compatibility.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_local_managers_compatibility short_description: 'Checks the compatibility of a local manager for registration with a global manager' description: "Checks the compatibility of a local manager for registration with a global manager" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Deployed NSX Global manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str site_connection_info: description: 'Site Connection ofno (fqdn, password...' fqdn: description: 'IP address or hostname of local manager' required: true type: str password: description: "Password for the user" no_log: 'True' required: false type: str required: false thumbprint: description: 'Thumbprint of local manager in the form of a SHA-256 hash represented in lower case HEX' no_log: 'True' required: false type: str username: description: 'Username value of the local manager' required: false type: str ''' EXAMPLES = ''' - name: Checks the compatibility of a local manager for registration with a global manager nsxt_local_managers_compatibility: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False site_connection_info: fqdn: "10.161.244.213" username: "admin" password: "Admin!23" thumbprint: "1a4eeaef05ad711c84d688cfb72001d17a4965a963611d9af63fb86ff55276cf" ''' RETURN = ''' version_compatible: description: Specifies whether local manager version is compatible with global manager. type: bool returned: when API invocation is successful ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_local_manager_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def main(): argument_spec = vmware_argument_spec() argument_spec.update(site_connection_info=dict(required=False, type='dict', no_log=True, username=dict(required=False, type='str'), password=dict(required=False, type='str'), thumbprint=dict(required=False, type='str'), fqdn=dict(required=True, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) local_manager_params = get_local_manager_params(module.params.copy()) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/global-manager/api/v1'.format(mgr_hostname) check_copmatibility_api_url = manager_url + '/global-infra/onboarding-check-compatibility' headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' request_data = json.dumps(local_manager_params['site_connection_info']) try: (rc, resp) = request(check_copmatibility_api_url, data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing local manager. Error [%s]' % (to_native(err))) module.exit_json(changed=False, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_local_managers_facts.py ================================================ #!/usr/bin/python # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_local_managers_facts short_description: 'Return the list of local managers registered with the global manager' description: "Return the list of local managers registered with the global manager" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Deployed NSX Global manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Register local manager with NSX nsxt_local_managers_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/global-manager/api/v1'.format(mgr_hostname) sites_api_url = manager_url + '/global-infra/sites/' changed = False try: (rc, resp) = request(sites_api_url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing local managers. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_ports.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_ports short_description: Create a Logical Port description: "Creates a new logical switch port. The required parameters are the associated logical_switch_id and admin_state (UP or DOWN). Optional parameters are the attachment and switching_profile_ids. If you don't specify switching_profile_ids, default switching profiles are assigned to the port. If you don't specify an attachment, the switch port remains empty. To configure an attachment, you must specify an id, and optionally you can specify an attachment_type (VIF or LOGICALROUTER). The attachment_type is VIF by default." version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str address_bindings: description: 'Address bindings for logical port' required: false type: array of PacketAddressClassifier admin_state: description: Represents Desired state of the logical port required: true type: str attachment: attachment_type: description: Indicates the type of logical port attachment. By default it is Virtual Machine interface (VIF) required: false type: str context: allocate_addresses: description: "A flag to indicate whether to allocate addresses from allocation pools bound to the parent logical switch." required: false type: str app_id: description: "An application ID used to identify / look up a child VIF behind a parent VIF. Only effective when vif_type is CHILD." required: false type: str description: Attachment Context parent_vif_id: description: VIF ID of the parent VIF if vif_type is CHILD required: false type: str required: false resource_type: description: "The type of this resource" required: true type: str traffic_tag: description: "Current we use VLAN id as the traffic tag. Only effective when vif_type is CHILD. Each logical port inside a container must have a unique traffic tag. If the traffic_tag is not unique, no error is generated, but traffic will not be delivered to any port with a non-unique tag." required: false type: int transport_node_name: description: name of the transport node that observed a traceflow packet required: false type: str tunnel_id: description: Tunnel Id to uniquely identify the extension. required: true type: int type: dict vif_type: description: Type of the VIF attached to logical port required: true type: str description: Logical port attachment id: description: unique id required: true type: str required: false type: dict display_name: description: Display name required: true type: str extra_configs: description: 'This property could be used for vendor specific configuration in key value string pairs. Logical port setting will override logical switch setting if the same key was set on both logical switch and logical port.' required: false type: array of ExtraConfig ignore_address_bindings: description: 'IP Discovery module uses various mechanisms to discover address bindings being used on each port. If a user would like to ignore any specific discovered address bindings or prevent the discovery of a particular set of discovered bindings, then those address bindings can be provided here. Currently IP range in CIDR format is not supported.' required: false type: array of PacketAddressClassifier init_state: description: 'Set initial state when a new logical port is created. ''UNBLOCKED_VLAN'' means new port will be unblocked on traffic in creation, also VLAN will be set with corresponding logical switch setting.' required: false type: str logical_switch_name: description: Name of logical Switch required: true type: str description: description: 'Description of the resource' required: false type: str tags: description: 'Opaque identifiers meaningful to the API user' required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true switching_profiles: description: List of Switching Profiles name and type required: false type: list ''' EXAMPLES = ''' - name: Create a Logical Port nsxt_logical_ports: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False logical_switch_name: LS1 attachment: attachment_type: VIF id: vif1 admin_state: UP state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_logical_port_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_logical_ports(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/logical-ports', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing logical ports. Error [%s]' % (to_native(err))) return resp def get_logical_port_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): logical_ports = get_logical_ports(module, manager_url, mgr_username, mgr_password, validate_certs) if logical_ports and len(logical_ports['results'])>0: for logical_port in logical_ports['results']: if logical_port.__contains__('display_name') and logical_port['display_name'] == display_name: return logical_port return None def get_transport_nodes(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/transport-nodes', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport nodes. Error [%s]' % (to_native(err))) return resp def get_tn_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): transport_nodes = get_transport_nodes(module, manager_url, mgr_username, mgr_password, validate_certs) for transport_node in transport_nodes['results']: if transport_node.__contains__('display_name') and transport_node['display_name'] == display_name: return transport_node return None def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] module.fail_json(msg='No id exists with display name %s' % display_name) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, logical_port_params ): logical_port_params['logical_switch_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, '/logical-switches', logical_port_params.pop('logical_switch_name', None)) host_switch_profile_ids = [] host_switch_profiles = logical_port_params.pop('switching_profiles', None) if host_switch_profiles: for host_switch_profile in host_switch_profiles: profile_obj = {} profile_obj['value'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/switching-profiles", host_switch_profile['name']) profile_obj['key'] = host_switch_profile['type'] host_switch_profile_ids.append(profile_obj) logical_port_params['switching_profile_ids'] = host_switch_profile_ids if logical_port_params.__contains__('attachment') and logical_port_params['attachment'].__contains__('context') and \ logical_port_params['attachment']['context'].__contains__('transport_node_name'): logical_port_params['attachment']['context']['transport_node_uuid'] = get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, '/transport-nodes', logical_port_params['attachment']['context']['transport_node_name']) return logical_port_params # def ordered(obj): # if isinstance(obj, dict): # return sorted((k, ordered(v)) for k, v in obj.items()) # if isinstance(obj, list): # return sorted(ordered(x) for x in obj) # else: # return obj def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, logical_port_with_ids): existing_logical_port = get_logical_port_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, logical_port_with_ids['display_name']) if existing_logical_port is None: return False if existing_logical_port.__contains__('attachment') and existing_logical_port['attachment'].__contains__('attachment_type') and \ logical_port_with_ids.__contains__('attachment') and logical_port_with_ids['attachment'].__contains__('attachment_type') and \ (existing_logical_port['attachment']['attachment_type'] != logical_port_with_ids['attachment']['attachment_type'] or \ existing_logical_port['attachment']['id'] != logical_port_with_ids['attachment']['id']): return True if existing_logical_port.__contains__('switching_profile_ids') and logical_port_with_ids.__contains__('switching_profile_ids') and \ existing_logical_port['switching_profile_ids'] != logical_port_with_ids['switching_profile_ids']: return True if existing_logical_port['admin_state'] != logical_port_with_ids['admin_state']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), logical_switch_name=dict(required=True, type='str'), init_state=dict(required=False, type='str'), switching_profiles=dict(required=False, type='list'), attachment=dict(required=False, type='dict', attachment_type=dict(required=False, type='str'), context=dict(required=False, type='dict', tunnel_id=dict(required=True, type='int'), vif_type=dict(required=True, type='str'), parent_vif_id=dict(required=False, type='str'), traffic_tag=dict(required=False, type='int'), app_id=dict(required=False, type='str'), allocate_addresses=dict(required=False, type='str'), resource_type=dict(required=True, type='str'), transport_node_name=dict(required=False, type='str')), id=dict(required=True, type='str')), admin_state=dict(required=True, type='str'), extra_configs=dict(required=False, type='list'), address_bindings=dict(required=False, type='list'), ignore_address_bindings=dict(required=False, type='list'), description=dict(required=False, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) logical_port_params = get_logical_port_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) lport_dict = get_logical_port_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) lport_id, revision = None, None if lport_dict: lport_id = lport_dict['id'] revision = lport_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, logical_port_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the logical_port if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id='12345') request_data = json.dumps(body) try: if lport_id: module.exit_json(changed=False, id=lport_id, message="Logical port with display_name %s already exist"% module.params['display_name']) (rc, resp) = request(manager_url+ '/logical-ports', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add logical port. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Logical port with displayname %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id=lport_id) body['_revision'] = revision # update current revision request_data = json.dumps(body) id = lport_id try: (rc, resp) = request(manager_url+ '/logical-ports/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update logical port with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="logical port with id %s updated." % id) elif state == 'absent': # delete the array id = lport_id if id is None: module.exit_json(changed=False, msg='No logical port exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_port_params)), id=id) try: (rc, resp) = request(manager_url + "/logical-ports/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete logical port with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="Logical port with id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_ports_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_ports_facts short_description: List All Logical Ports description: Returns information about all configured logical switch ports. Logical switch ports connect to VM virtual network interface cards (NICs). Each logical port is associated with one logical switch. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List All Logical Ports nsxt_logical_ports_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils.urls import open_url, fetch_url from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/logical-ports', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of logical ports. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_router_ports.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_router_ports short_description: Create a Logical Router Port description: "Creates a logical router port. The required parameters include resource_type (LogicalRouterUpLinkPort, LogicalRouterDownLinkPort, LogicalRouterLinkPort, LogicalRouterLoopbackPort, LogicalRouterCentralizedServicePort); and logical_router_id (the router to which each logical router port is assigned). The service_bindings parameter is optional." version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str admin_state: description: Admin state of port. required: false type: str display_name: description: Display name required: true type: str edge_cluster_member_index: description: Member index of the edge node on the cluster required: false type: list enable_netx: description: Port is exclusively used for N-S service insertion required: false type: boolean linked_logical_router_port_id: description: Identifier of connected LogicalRouterLinkPortOnTIER1 of TIER1 logical router is_valid: description: Will be set to false if the referenced NSX resource has been deleted. required: false type: boolean profile_type: description: Profile type of the ServiceConfig required: true type: str required: false selected: description: Set to true if this resource has been selected to be acted upon required: true type: boolean service: alg: description: "The Application Layer Gateway (ALG) protocol. Please note, protocol NBNS_BROADCAST and NBDG_BROADCAST are deprecated.Please use UDP protocol and create L4 Port Set type of service instead." required: true type: str description: Service which registered the ip. destination_ports: description: Destination ports required: false type: list ether_type: description: Type of the encapsulated protocol required: true type: int icmp_code: description: 'Code of the IPv4 ICMP message.' required: false type: int icmp_type: description: ICMP message type required: false type: int l4_protocol: description: L4 protocol required: true type: str protocol: description: Export protocol required: true type: str protocol_number: description: The IP protocol number required: true type: int required: false resource_type: description: "LogicalRouterUpLinkPort is allowed only on TIER0 logical router. It is the north facing port of the logical router. LogicalRouterLinkPortOnTIER0 is allowed only on TIER0 logical router. This is the port where the LogicalRouterLinkPortOnTIER1 of TIER1 logical router connects to. LogicalRouterLinkPortOnTIER1 is allowed only on TIER1 logical router. This is the port using which the user connected to TIER1 logical router for upwards connectivity via TIER0 logical router. Connect this port to the LogicalRouterLinkPortOnTIER0 of the TIER0 logical router. LogicalRouterDownLinkPort is for the connected subnets on the logical router. LogicalRouterLoopbackPort is a loopback port for logical router component which is placed on c hosen edge cluster member. LogicalRouterIPTunnelPort is a IPSec VPN tunnel port created on logical router when route based VPN session configured. LogicalRouterCentralizedServicePort is allowed only on Active/Standby TIER0 and TIER1 logical router. Port can be connected to VLAN or overlay logical switch. Unlike downlink port it does not participate in distributed routing and hosted on all edge cluster members associated with logical router. Stateful services can be applied on this port." required: true type: str source_ports: description: Source ports required: false type: list type: dict target_display_name: description: Display name of the NSX resource. required: false type: str target_id: description: Identifier of the NSX resource. required: false type: str target_type: description: Type of the Policy object corresponding to the source type (e.g. Segment). required: false type: str type: dict linked_logical_switch_port_id: description: Reference to the logical switch port to connect to is_valid: description: Will be set to false if the referenced NSX resource has been deleted. required: false type: boolean profile_type: description: Profile type of the ServiceConfig required: true type: str required: false selected: description: Set to true if this resource has been selected to be acted upon required: true type: boolean service: alg: description: "The Application Layer Gateway (ALG) protocol. Please note, protocol NBNS_BROADCAST and NBDG_BROADCAST are deprecated. Please use UDP protocol and create L4 Port Set type of service instead." required: true type: str description: Service which registered the ip. destination_ports: description: Destination ports required: false type: list ether_type: description: Type of the encapsulated protocol required: true type: int icmp_code: description: 'Code of the IPv4 ICMP message.' required: false type: int icmp_type: description: ICMP Type required: false type: int l4_protocol: description: L4 Protocol required: true type: str protocol: description: Export protocol required: true type: str protocol_number: description: The IP protocol number required: true type: int required: false resource_type: description: "LogicalRouterUpLinkPort is allowed only on TIER0 logical router. It is the north facing port of the logical router. LogicalRouterLinkPortOnTIER0 is allowed only on TIER0 logical router. This is the port where the LogicalRouterLinkPortOnTIER1 of TIER1 logical router connects to. LogicalRouterLinkPortOnTIER1 is allowed only on TIER1 logical router. This is the port using which the user connected to TIER1 logical router for upwards connectivity via TIER0 logical router. Connect this port to the LogicalRouterLinkPortOnTIER0 of the TIER0 logical router. LogicalRouterDownLinkPort is for the connected subnets on the logical router. LogicalRouterLoopbackPort is a loopback port for logical router component which is placed on c hosen edge cluster member. LogicalRouterIPTunnelPort is a IPSec VPN tunnel port created on logical router when route based VPN session configured. LogicalRouterCentralizedServicePort is allowed only on Active/Standby TIER0 and TIER1 logical router. Port can be connected to VLAN or overlay logical switch. Unlike downlink port it does not participate in distributed routing and hosted on all edge cluster members associated with logical router. Stateful services can be applied on this port." required: true type: str source_ports: description: Source ports required: false type: list type: dict target_display_name: description: Display name of the NSX resource. required: false type: str target_id: description: Identifier of the NSX resource. required: false type: str target_type: description: Type of the Policy object corresponding to the source type (e.g. Segment). required: false type: str type: dict logical_router_name: description: Name of the logical router required: true type: str mac_address: description: MAC address required: false type: str mtu: description: 'Maximum transmission unit specifies the size of the largest packet that a network protocol can transmit. If not specified, the global logical MTU set in the /api/v1/global-configs/RoutingGlobalConfig API will be used.' required: false type: int resource_type: description: "LogicalRouterUpLinkPort is allowed only on TIER0 logical router. It is the north facing port of the logical router. LogicalRouterLinkPortOnTIER0 is allowed only on TIER0 logical router. This is the port where the LogicalRouterLinkPortOnTIER1 of TIER1 logical router connects to. LogicalRouterLinkPortOnTIER1 is allowed only on TIER1 logical router. This is the port using which the user connected to TIER1 logical router for upwards connectivity via TIER0 logical router. Connect this port to the LogicalRouterLinkPortOnTIER0 of the TIER0 logical router. LogicalRouterDownLinkPort is for the connected subnets on the logical router. LogicalRouterLoopbackPort is a loopback port for logical router component which is placed on c hosen edge cluster member. LogicalRouterIPTunnelPort is a IPSec VPN tunnel port created on logical router when route based VPN session configured. LogicalRouterCentralizedServicePort is allowed only on Active/Standby TIER0 and TIER1 logical router. Port can be connected to VLAN or overlay logical switch. Unlike downlink port it does not participate in distributed routing and hosted on all edge cluster members associated with logical router. Stateful services can be applied on this port." required: true type: str service_bindings: description: Service Bindings required: false type: array of ServiceBinding state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true subnets: description: Logical router port subnets required: false type: array of IPSubnet urpf_mode: description: Unicast Reverse Path Forwarding mode required: false type: str vpn_session_id: description: Associated VPN session identifier. required: false type: str description: description: Description of the resou required: False type: 'str' ndra_profile_id: description: NDRA Profile id required: False type: 'str' enable_multicast: description: Flag to enable/disable Multicast required: False type: 'bool' routing_policies: description: Routing policies used to specify how the traffic, which matches the policy routes, will be processed. required: False type: 'list' ndra_prefix_config: description: Configuration to override the neighbor discovery router advertisement prefix time parameters at the subnet level. Note that users are allowed to override the prefix time only for IPv6 subnets which are configured on the port. required: False type: 'list' pim_config: description: PIM configuration parameters required: False type: 'dict' enabled: description: Flag to enable/disable PIM required: False type: 'bool' default: False tags: description: Opaque identifiers meaningful to the API user required: False type: list ''' EXAMPLES = ''' - name: Create a Logical Router Port nsxt_logical_routers_ports: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False resource_type: LogicalRouterDownLinkPort logical_router_name: "lr-1" linked_logical_switch_port_id: target_type: LogicalPort target_id: "18691381-b08f-4d90-8c0c-98d0e449b141" subnets: - ip_addresses: - "172.16.40.1" prefix_length: 24 state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_logical_router_port_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_logical_router_ports(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/logical-router-ports', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing logical router ports. Error [%s]' % (to_native(err))) return resp def get_lr_port_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): logical_router_ports = get_logical_router_ports(module, manager_url, mgr_username, mgr_password, validate_certs) for logical_router_port in logical_router_ports['results']: if logical_router_port.__contains__('display_name') and logical_router_port['display_name'] == display_name: return logical_router_port return None def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_port_params ): logical_router_port_params['logical_router_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, '/logical-routers', logical_router_port_params.pop('logical_router_name', None)) if logical_router_port_params.__contains__('linked_logical_switch_port_id') and \ logical_router_port_params['linked_logical_switch_port_id'].__contains__('target_display_name'): if not logical_router_port_params['linked_logical_switch_port_id'].__contains__('target_id'): logical_router_port_params['linked_logical_switch_port_id']['target_id'] = get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, '/logical-ports', logical_router_port_params['linked_logical_switch_port_id']['target_display_name']) return logical_router_port_params def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_port_params): existing_lr_port = get_lr_port_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_port_params['display_name']) if existing_lr_port is None: return False if existing_lr_port.__contains__('description') and logical_router_port_params.__contains__('description') and\ existing_lr_port['description'] != logical_router_port_params['description']: return True if existing_lr_port.__contains__('description') and not logical_router_port_params.__contains__('description'): return True if not existing_lr_port.__contains__('description') and logical_router_port_params.__contains__('description'): return True if existing_lr_port['resource_type'] != logical_router_port_params['resource_type']: return True if existing_lr_port['logical_router_id'] != logical_router_port_params['logical_router_id']: return True if existing_lr_port.__contains__('service_bindings') and logical_router_port_params.__contains__('service_bindings') and \ existing_lr_port['service_bindings'] != logical_router_port_params['service_bindings']: return True if existing_lr_port.__contains__('subnets') and logical_router_port_params.__contains__('subnets') and \ existing_lr_port['subnets'] != logical_router_port_params['subnets']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), ndra_profile_id=dict(required=False, type='str'), enable_multicast=dict(required=False, type='bool'), routing_policies=dict(required=False, type='list'), ndra_prefix_config=dict(required=False, type='list'), pim_config=dict(required=False, type='dict', enabled=dict(required=False, type='bool', default=False)), subnets=dict(required=False, type='list'), urpf_mode=dict(required=False, type='str'), mac_address=dict(required=False, type='str'), linked_logical_switch_port_id=dict(required=False, type='dict', profile_type=dict(required=True, type='str'), selected=dict(required=True, type='bool'), service=dict(required=False, type='dict', ether_type=dict(required=True, type='int'), destination_ports=dict(required=False, type='list'), l4_protocol=dict(required=True, type='str'), source_ports=dict(required=False, type='list'), icmp_code=dict(required=False, type='int'), icmp_type=dict(required=False, type='int'), protocol=dict(required=True, type='str'), protocol_number=dict(required=True, type='int'), alg=dict(required=True, type='str'), resource_type=dict(required=True, type='str')), target_display_name=dict(required=False, type='str'), is_valid=dict(required=False, type='bool'), target_id=dict(required=False, type='str'), target_type=dict(required=False, type='str')), admin_state=dict(required=False, type='str'), vpn_session_id=dict(required=False, type='str'), enable_netx=dict(required=False, type='bool'), edge_cluster_member_index=dict(required=False, type='list'), mtu=dict(required=False, type='int'), linked_logical_router_port_id=dict(required=False, type='dict', profile_type=dict(required=True, type='str'), selected=dict(required=True, type='bool'), service=dict(required=False, type='dict', ether_type=dict(required=True, type='int'), destination_ports=dict(required=False, type='list'), l4_protocol=dict(required=True, type='str'), source_ports=dict(required=False, type='list'), icmp_code=dict(required=False, type='int'), icmp_type=dict(required=False, type='int'), protocol=dict(required=True, type='str'), protocol_number=dict(required=True, type='int'), alg=dict(required=True, type='str'), resource_type=dict(required=True, type='str')), target_display_name=dict(required=False, type='str'), is_valid=dict(required=False, type='bool'), target_id=dict(required=False, type='str'), target_type=dict(required=False, type='str')), logical_router_name=dict(required=True, type='str'), service_bindings=dict(required=False, type='list'), resource_type=dict(required=True, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) logical_router_port_params = get_logical_router_port_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) logical_router_port_dict = get_lr_port_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) logical_router_port_id, revision = None, None if logical_router_port_dict: logical_router_port_id = logical_router_port_dict['id'] revision = logical_router_port_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_port_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) if not updated: # add the logical_router_port if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_port_params)), id='12345') request_data = json.dumps(logical_router_port_params) try: if logical_router_port_id: module.exit_json(changed=False, id=logical_router_port_id, message="Logical router port with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/logical-router-ports', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add logical router port. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Logical router port with displayname %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_port_params)), id=logical_router_port_id) logical_router_port_params['_revision'] = revision # update current revision request_data = json.dumps(logical_router_port_params) id = logical_router_port_id try: (rc, resp) = request(manager_url+ '/logical-router-ports/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update logical router port with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="logical router port with id %s updated." % id) elif state == 'absent': # delete the array id = logical_router_port_id if id is None: module.exit_json(changed=False, msg='No logical router port exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_port_params)), id=id) try: (rc, resp) = request(manager_url + "/logical-router-ports/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete logical router port with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="Logical router port with id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_router_ports_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_router_ports_facts short_description: List Logical Router Ports description: Returns information about all logical router ports. Information includes the resource_type (LogicalRouterUpLinkPort, LogicalRouterDownLinkPort, LogicalRouterLinkPort, LogicalRouterLoopbackPort, LogicalRouterCentralizedServicePort); logical_router_id (the router to which each logical router port is assigned); and any service_bindings (such as DHCP relay service). The GET request can include a query parameter (logical_router_id or logical_switch_id). version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Logical Router Ports nsxt_logical_router_ports_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/logical-router-ports', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of logical ports. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_router_static_routes.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_router_static_routes short_description: Add Static Routes on a Logical Router description: Add Static Routes on a Logical Router version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str id: description: unique id required: false type: str display_name: description: Display name of the resource required: True type: str description: description: Description of the resource required: false type: str tags: description: Opaque identifier meaningful to API user required: false type: Array of Tag logical_router_name: description: Name of the logical router required: false type: str network: description: destination in cidr required: true type: str next_hops: description: Next Hops required: true type: array of StaticRouteNextHop state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Add Static Routes on a Logical Router nsxt_logical_router_static_routes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "static_route" logical_router_name: "tier-0" next_hops: - administrative_distance: '2' ip_address: 192.168.200.253 network: 192.168.200.0/24 state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_body_object(body): if body.__contains__('id'): del body['id'] if body.__contains__('logical_router_id'): del body['logical_router_id'] return body def get_logical_router_static_route_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params ): logical_router_static_route_params['logical_router_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, '/logical-routers', logical_router_static_route_params.pop('logical_router_name', None)) return logical_router_static_route_params def get_logical_router_static_routes(module, manager_url, mgr_username, mgr_password, validate_certs,logical_router_id): try: (rc, resp) = request(manager_url+ '/logical-routers/%s/routing/static-routes' % logical_router_id , headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing logical router ports. Error [%s]' % (to_native(err))) return resp def get_lr_static_route_from_network(module, manager_url, mgr_username, mgr_password, validate_certs, network, logical_router_id): logical_router_st_routes = get_logical_router_static_routes(module, manager_url, mgr_username, mgr_password, validate_certs,logical_router_id) for logical_router_st_route in logical_router_st_routes['results']: if logical_router_st_route.__contains__('network') and logical_router_st_route['network'] == network: return logical_router_st_route return None def get_lr_static_route_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params): logical_router_static_routes = get_logical_router_static_routes(module, manager_url, mgr_username, mgr_password, validate_certs,logical_router_static_route_params['logical_router_id']) for result in logical_router_static_routes['results']: if result.__contains__('display_name') and result['display_name'] == logical_router_static_route_params['display_name']: return result return None def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params): existing_lr_static_route = get_lr_static_route_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params) if existing_lr_static_route is None: return False if existing_lr_static_route.__contains__('description') and not logical_router_static_route_params.__contains__('description'): return True if not existing_lr_static_route.__contains__('description') and logical_router_static_route_params.__contains__('description'): return True if existing_lr_static_route.__contains__('description') and logical_router_static_route_params.__contains__('description') and\ existing_lr_static_route['description'] != logical_router_static_route_params['description']: return True if existing_lr_static_route.__contains__('next_hops') and not logical_router_static_route_params.__contains__('next_hops'): return True if not existing_lr_static_route.__contains__('next_hops') and logical_router_static_route_params.__contains__('next_hops'): return True if existing_lr_static_route.__contains__('next_hops') and logical_router_static_route_params.__contains__('next_hops') and\ existing_lr_static_route['next_hops'] != logical_router_static_route_params['next_hops']: return True if existing_lr_static_route.__contains__('network') and not logical_router_static_route_params.__contains__('network'): return True if not existing_lr_static_route.__contains__('network') and logical_router_static_route_params.__contains__('network'): return True if existing_lr_static_route.__contains__('network') and logical_router_static_route_params.__contains__('network') and\ existing_lr_static_route['network'] != logical_router_static_route_params['network']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(next_hops=dict(required=True, type='list'), logical_router_name=dict(required=False, type='str'), network=dict(required=True, type='str'), id=dict(required=False, type= 'str'), display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) logical_router_static_route_params = get_logical_router_static_route_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params) logical_router_id = logical_router_static_route_params["logical_router_id"] logical_router_static_route_id = get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, '/logical-routers/' + logical_router_id + '/routing/static-routes', display_name, False) logical_router_static_route = get_lr_static_route_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params) if logical_router_static_route: revision = logical_router_static_route['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_static_route_params) if not updated: # add the logical_router_static_route if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_static_route_params)), id='12345') request_data = json.dumps(logical_router_static_route_params) try: if logical_router_static_route_id: module.exit_json(changed=False, id=logical_router_static_route_id, message="Logical router static route with network %s already exist."% module.params['network']) (rc, resp) = request(manager_url+ '/logical-routers/%s/routing/static-routes' % logical_router_id, data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add logical router port. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Logical router static route with network %s created." % module.params['network']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_static_route_params)), id=logical_router_static_route_id) logical_router_static_route_params['_revision'] = revision # update current revision request_data = json.dumps(logical_router_static_route_params) id = logical_router_static_route_id try: (rc, resp) = request(manager_url+ '/logical-routers/%s/routing/static-routes/%s' % (logical_router_id,id), data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update logical router static route with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="logical router static route with id %s updated." % id) elif state == 'absent': if logical_router_static_route_id is None: module.exit_json(changed=False, msg='No logical router static route exist with network %s' % network) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_static_route_params)), id=logical_router_static_route_id) try: (rc, resp) = request(manager_url + "/logical-routers/%s/routing/static-routes/%s" % (logical_router_id,logical_router_static_route_id), method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete logical static route with id %s. Error[%s]." % (logical_router_static_route_id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=logical_router_static_route_id, message="Logical router static route with id %s deleted." % logical_router_static_route_id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_routers.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_routers short_description: Create a Logical Router description: Creates a logical router. The required parameters are router_type (TIER0 or TIER1) and edge_cluster_id (TIER0 only). Optional parameters include internal and external transit network addresses. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str advanced_config: description: Contains config properties for tier0 routers external_transit_networks: description: CIDR block defining tier0 to tier1 links required: false type: list ha_vip_configs: description: This configuration can be defined only for Active-Standby LogicalRouter to provide redundancy. For multiple uplink ports, multiple HaVipConfigs must be defined and each config will pair exactly two uplink ports. The VIP will move and will always be owned by the Active node. Note - when HaVipConfig[s] are defined, configuring dynamic-routing is disallowed. required: false type: array of HaVipConfig internal_routing_network: description: Internal Routing Name required: false type: str internal_transit_networks: description: CIDR block defining service router to distributed router links required: false type: list required: false transport_zone_name: description: Name of transport zone required: false type: str type: dict allocation_profile: allocation_pool: allocation_pool_type: description: Types of logical router allocation pool based on services required: true type: str allocation_size: description: "To address varied customer performance and scalability requirements, different sizes for load balancer service are supported: SMALL, MEDIUM and LARGE, each with its own set of resource and performance. Specify size of load balancer service which you will bind to TIER1 router." required: true type: str description: "Logical router allocation can be tracked for specific services and services may have their own hard limits and allocation sizes. For example load balancer pool should be specified if load balancer service will be attached to logical router." required: false type: dict description: 'Configurations options to auto allocate edge cluster members for logical router. Auto allocation is supported only for TIER1 and pick least utilized member post current assignment for next allocation.' enable_standby_relocation: description: 'Flag to enable the auto-relocation of standby service router running on edge cluster and node associated with the logical router. Only manually placed service contexts for tier1 logical routers are considered for the relocation.' required: false type: boolean required: false type: dict description: description: Description of the resource required: false type: str display_name: description: Display name required: true type: str edge_cluster_member_indices: description: 'For stateful services, the logical router should be associated with edge cluster. For TIER 1 logical router, for manual placement of service router within the cluster, edge cluster member indices needs to be provided else same will be auto-allocated. You can provide maximum two indices for HA ACTIVE_STANDBY. For TIER0 logical router this property is no use and placement is derived from logical router uplink or loopback port.' required: false type: list edge_cluster_name: description: Name of edge cluster required: false type: str failover_mode: description: 'Determines the behavior when a logical router instance restarts after a failure. If set to PREEMPTIVE, the preferred node will take over, even if it causes another failure. If set to NON_PREEMPTIVE, then the instance that restarted will remain secondary. This property must not be populated unless the high_availability_mode property is set to ACTIVE_STANDBY. If high_availability_mode property is set to ACTIVE_STANDBY and this property is not specified then default will be NON_PREEMPTIVE.' required: false type: str high_availability_mode: description: High availability mode required: false type: str preferred_edge_cluster_member_index: description: Used for tier0 routers only required: false type: int resource_type: choices: - LogicalRouter description: "A Policy Based VPN requires to define protect rules that match local and peer subnets. IPSec security associations is negotiated for each pair of local and peer subnet. A Route Based VPN is more flexible, more powerful and recommended over policy based VPN. IP Tunnel port is created and all traffic routed via tunnel port is protected. Routes can be configured statically or can be learned through BGP. A route based VPN is must for establishing redundant VPN session to remote site." required: false type: str router_type: description: Type of Logical Router required: true type: str ipv6_profiles: description: IPv6 Profiles required: false type: dict dad_profile_id: description: DAD profile id required: False type: str ndra_profile_id: description: NDRA profile id required: False type: str tags: description: Opaque identifiers meaningful to the API user required: false type: list state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Create a Logical Router nsxt_logical_routers: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False resource_type: LogicalRouter description: "Router West" display_name: "tier-0" edge_cluster_name: edge-cluster-1 router_type: TIER0 high_availability_mode: ACTIVE_ACTIVE state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_logical_router_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_logical_routers(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/logical-routers', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing logical routers. Error [%s]' % (to_native(err))) return resp def get_lr_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): logical_routers = get_logical_routers(module, manager_url, mgr_username, mgr_password, validate_certs) for logical_router in logical_routers['results']: if logical_router.__contains__('display_name') and logical_router['display_name'] == display_name: return logical_router return None def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] module.fail_json(msg='No id exists with display name %s' % display_name) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_params ): if logical_router_params.__contains__('edge_cluster_name'): edge_cluster_name = logical_router_params.pop('edge_cluster_name', None) logical_router_params['edge_cluster_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/edge-clusters", edge_cluster_name) if logical_router_params.__contains__('ipv6_profiles'): if logical_router_params['ipv6_profiles'].__contains__('dad_profile_name'): dad_profile_name = logical_router_params['ipv6_profiles'].pop('dad_profile_name') logical_router_params['ipv6_profiles']['dad_profile_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/ipv6/dad-profiles", dad_profile_name) if logical_router_params['ipv6_profiles'].__contains__('ndra_profile_name'): ndra_profile_name = logical_router_params['ipv6_profiles'].pop('ndra_profile_name') logical_router_params['ipv6_profiles']['ndra_profile_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/ipv6/nd-ra-profiles", ndra_profile_name) if logical_router_params.__contains__('advanced_config') and logical_router_params['advanced_config'].__contains__('transport_zone_name'): transport_zone_name= logical_router_params['advanced_config'].pop('transport_zone_name', None) logical_router_params['advanced_config']['transport_zone_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-zones", transport_zone_name) if logical_router_params.__contains__('advanced_config') and logical_router_params['advanced_config'].__contains__( 'ha_vip_configs'): for i in range(len(logical_router_params['advanced_config']['ha_vip_configs'])): ha_vip_config = logical_router_params['advanced_config']['ha_vip_configs'][i] if ha_vip_config.__contains__('redundant_uplink_port_ids') and ha_vip_config.__contains__( 'redundant_uplink_port_names'): ha_vip_config.pop('redundant_uplink_port_ids', None) if ha_vip_config.__contains__('redundant_uplink_port_ids') and not ha_vip_config.__contains__( 'redundant_uplink_port_names'): continue if not ha_vip_config.__contains__('redundant_uplink_port_ids') and not ha_vip_config.__contains__( 'redundant_uplink_port_names'): continue uplink_profiles_names = ha_vip_config['redundant_uplink_port_names'] ha_vip_config.pop('redundant_uplink_port_names', None) uplink_profile_ids = get_id_from_display_name_uplink(module, manager_url, mgr_username, mgr_password, validate_certs, "/logical-router-ports", uplink_profiles_names) ha_vip_config['redundant_uplink_port_ids'] = uplink_profile_ids logical_router_params['advanced_config']['ha_vip_configs'][i] = ha_vip_config return logical_router_params def get_id_from_display_name_uplink(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, uplink_profiles_display_names): uplink_profile_display_name1 = uplink_profiles_display_names[0] uplink_profile_display_name2 = uplink_profiles_display_names[1] try: (rc, resp) = request(manager_url + endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for uplink display names %s, %s. Error [%s]' % ( uplink_profile_display_name1, uplink_profile_display_name2, to_native(err))) uplink_profile_ids = [] for result in resp['results']: if result.__contains__('display_name') and (result['display_name'] == uplink_profile_display_name1 or result[ 'display_name'] == uplink_profile_display_name2): uplink_profile_ids.append(result['id']) if uplink_profile_ids is None or len(uplink_profile_ids) < 2: module.fail_json(msg='No id exists with uplink display name %s, %s' % ( uplink_profile_display_name1, uplink_profile_display_name2)) return uplink_profile_ids def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_with_ids): existing_logical_router = get_lr_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_with_ids['display_name']) if existing_logical_router is None: return False if existing_logical_router.__contains__('tags') and not logical_router_with_ids.__contains__('tags'): return True if not existing_logical_router.__contains__('tags') and logical_router_with_ids.__contains__('tags'): return True if existing_logical_router.__contains__('tags') and logical_router_with_ids.__contains__('tags') and ( not compareTags(existing_logical_router, logical_router_with_ids)): return True if existing_logical_router.__contains__('edge_cluster_id') and logical_router_with_ids.__contains__( 'edge_cluster_id') and existing_logical_router['edge_cluster_id'] != logical_router_with_ids[ 'edge_cluster_id']: return True if existing_logical_router.__contains__('advanced_config') and not logical_router_with_ids.__contains__( 'advanced_config'): return True if not existing_logical_router.__contains__('advanced_config') and logical_router_with_ids.__contains__( 'advanced_config'): return True if existing_logical_router.__contains__('advanced_config') and logical_router_with_ids.__contains__( 'advanced_config') and \ existing_logical_router['advanced_config'].__contains__('ha_vip_configs') and not logical_router_with_ids[ 'advanced_config'].__contains__('ha_vip_configs'): return True if existing_logical_router.__contains__('advanced_config') and logical_router_with_ids.__contains__( 'advanced_config') and not \ existing_logical_router['advanced_config'].__contains__('ha_vip_configs') and logical_router_with_ids[ 'advanced_config'].__contains__('ha_vip_configs'): return True if existing_logical_router.__contains__('advanced_config') and logical_router_with_ids.__contains__( 'advanced_config') and \ existing_logical_router['advanced_config'].__contains__('ha_vip_configs') and logical_router_with_ids[ 'advanced_config'].__contains__('ha_vip_configs') and \ not checkRedundantUplinkPortIds(existing_logical_router['advanced_config']['ha_vip_configs'], logical_router_with_ids['advanced_config']['ha_vip_configs']): return True if existing_logical_router.__contains__('advanced_config') and logical_router_with_ids.__contains__( 'advanced_config'): if existing_logical_router['advanced_config'].__contains__('internal_transit_network') and \ logical_router_with_ids['advanced_config'].__contains__('internal_transit_network') and \ existing_logical_router['advanced_config']['internal_transit_network'] != \ logical_router_with_ids['advanced_config']['internal_transit_network']: return True if existing_logical_router['advanced_config'].__contains__('external_transit_networks') and \ logical_router_with_ids['advanced_config'].__contains__('external_transit_networks') and \ existing_logical_router['advanced_config']['external_transit_networks'] != \ logical_router_with_ids['advanced_config']['external_transit_networks']: return True if existing_logical_router['advanced_config'].__contains__('ha_vip_configs') is False and \ logical_router_with_ids['advanced_config'].__contains__('ha_vip_configs') is True: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), edge_cluster_member_indices=dict(required=False, type='list'), allocation_profile=dict(required=False, type='dict', allocation_pool=dict(required=False, type='dict', allocation_size=dict(required=True, type='str'), allocation_pool_type=dict(required=True, type='str')), enable_standby_relocation=dict(required=False, type='boolean')), failover_mode=dict(required=False, type='str'), advanced_config=dict(required=False, type='dict', transport_zone_name=dict(required=False, type='str'), internal_transit_networks=dict(required=False, type='list'), internal_routing_network=dict(required=False, type='str'), ha_vip_configs=dict(required=False, type='list', enabled=dict(required=False, type='boolean'), ha_vip_subnets=dict(required=False, type='list', active_vip_addresses=dict(required=False, type='list'), prefix_length=dict(required=False, type='str')), redundant_uplink_port_ids=dict(required=False, type='list'), redundant_uplink_port_names=dict(required=False,type='list')), external_transit_networks=dict(required=False, type='list')), router_type=dict(required=True, type='str'), preferred_edge_cluster_member_index=dict(required=False, type='int'), high_availability_mode=dict(required=False, type='str'), edge_cluster_name=dict(required=False, type='str'), tags=dict(required=False, type='list'), ipv6_profiles=dict(required=False, type='dict', dad_profile_name=dict(required=False, type='str'), ndra_profile_name=dict(required=False, type='str')), resource_type=dict(required=False, type='str', choices=['LogicalRouter']), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) logical_router_params = get_logical_router_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) logical_router_dict = get_lr_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) logical_router_id, revision = None, None if logical_router_dict: logical_router_id = logical_router_dict['id'] revision = logical_router_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the router if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id='12345') request_data = json.dumps(body) try: if logical_router_id: module.exit_json(changed=False, id=logical_router_id, message="Logical router with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/logical-routers', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add logical router. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Logical router with display_name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id=logical_router_id) body['_revision'] = revision # update current revision request_data = json.dumps(body) id = logical_router_id try: (rc, resp) = request(manager_url+ '/logical-routers/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update logical router with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="logical router with id %s updated." % id) elif state == 'absent': # delete the array id = logical_router_id if id is None: module.exit_json(changed=False, msg='No logical router exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_router_params)), id=id) try: (rc, resp) = request(manager_url + "/logical-routers/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete logical router with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="logical router with id %s deleted." % id) def checkRedundantUplinkPortIds(existingHaVipConfigs, newHaVipConfigs): if len(existingHaVipConfigs) != len(newHaVipConfigs): return False for i in range(len(existingHaVipConfigs)): firstVal = ordered(existingHaVipConfigs[i]['redundant_uplink_port_ids']) secondVal = ordered(newHaVipConfigs[i]['redundant_uplink_port_ids']) if firstVal != secondVal: return False return True def compareTags(existing_logical_router, new_logical_router): return ordered(existing_logical_router['tags']) == ordered(new_logical_router['tags']) def ordered(obj): if isinstance(obj, dict): return sorted((k, ordered(v)) for k, v in obj.items()) if isinstance(obj, list): return sorted(ordered(x) for x in obj) else: return obj if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_routers_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_routers_facts short_description: List Logical Routers description: Returns information about all logical routers, including the UUID, internal and external transit network addresses, and the router type (TIER0 or TIER1). You can get information for only TIER0 routers or only the TIER1 routers by including the router_type query parameter. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Logical Routers nsxt_logical_routers_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/logical-routers', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of logical routers. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_switches.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_switches short_description: Create a Logical Switch description: Creates a new logical switch. The request must include the transport_zone_id, display_name, and admin_state (UP or DOWN). The replication_mode (MTEP or SOURCE) is required for overlay logical switches, but not for VLAN-based logical switches. A vlan needs to be provided for VLAN-based logical switches version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str address_bindings: description: Address bindings for the Logical switch required: false type: array of PacketAddressClassifier admin_state: description: Represents Desired state of the Logical Switch required: true type: str display_name: description: Display name required: true type: str extra_configs: description: 'This property could be used for vendor specific configuration in key value string pairs, the setting in extra_configs will be automatically inheritted by logical ports in the logical switch.' required: false type: array of ExtraConfig hybrid: description: 'If this flag is set to true, then all the logical switch ports attached to this logical switch will behave in a hybrid fashion. The hybrid logical switch port indicates to NSX that the VM intends to operate in underlay mode, but retains the ability to forward egress traffic to the NSX overlay network. This flag can be enabled only for the logical switches in the overlay type transport zone which has host switch mode as STANDARD and also has either CrossCloud or CloudScope tag scopes. Only the NSX public cloud gateway (PCG) uses this flag, other host agents like ESX, KVM and Edge will ignore it. This property cannot be modified once the logical switch is created.' required: false type: boolean ip_pool_name: description: IP pool name required: false type: str lswitch_id: description: LSwitch ID required: false type: str mac_pool_id: description: Mac pool id that associated with a LogicalSwitch. required: false type: str mac_pool_name: description: Mac pool name that associated with a LogicalSwitch. required: false type: str replication_mode: description: Replication mode of the Logical Switch required: false type: str description: description: Description of the resource required: False type: str span: description: List of Local Manager IDs the logical switch extends required: False type: list tags: description: Opaque identifiers meaningful to the API user required: False type: list state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true switch_type: description: 'This readonly field indicates purpose of a LogicalSwitch. It is set by manager internally and any user provided values will not be honored. DEFAULT type LogicalSwitches are created for basic L2 connectivity by API users. SERVICE_PLANE type LogicalSwitches are system created service plane LogicalSwitches Service Insertion service.' required: false type: str switching_profiles: description: List of Switching Profile Names and type required: false type: list transport_zone_name: description: Transport Zone Name required: true type: str uplink_teaming_policy_name: description: This name has to be one of the switching uplink teaming policy names listed inside the logical switch's TransportZone. If this field is not specified, the logical switch will not have a teaming policy associated with it and the host switch's default teaming policy will be used. required: false type: str vlan: description: 'This property is dedicated to VLAN based network, to set VLAN of logical network. It is mutually exclusive with ''vlan_trunk_spec''.' required: false type: int vlan_trunk_spec: description: 'This property is used for VLAN trunk specification of logical switch. It''s mutually exclusive with ''vlan''. Also it could be set to do guest VLAN tagging in overlay network.' required: false type: dict vlan_ranges: description: Trunk VLAN id ranges required: true type: array of TrunkVlanRange vni: description: 'Only for OVERLAY network. A VNI will be auto-allocated from the default VNI pool if not given; otherwise the given VNI has to be inside the default pool and not used by any other LogicalSwitch.' required: false type: int ''' EXAMPLES = ''' - name: Create logical switch nsxt_logical_switches: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "test_lswitch" replication_mode: "SOURCE" admin_state: "UP" transport_zone_name: "TZ1" state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, validate_nsx_mp_support from ansible.module_utils._text import to_native def get_logical_switch_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs', 'lswitch_id'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_logical_switches(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/logical-switches', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing logical switches. Error [%s]' % (to_native(err))) return resp def get_lswitch_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): logical_switchs = get_logical_switches(module, manager_url, mgr_username, mgr_password, validate_certs) for logical_switch in logical_switchs['results']: if logical_switch.__contains__('display_name') and logical_switch['display_name'] == display_name: return logical_switch return None def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] module.fail_json(msg='No id existe with display name %s' % display_name) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, logical_switch_params ): if 'ip_pool_name' in logical_switch_params: logical_switch_params['ip_pool_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/pools/ip-pools", logical_switch_params.pop('ip_pool_name', None)) if 'mac_pool_name' in logical_switch_params and 'mac_pool_id' not in logical_switch_params: logical_switch_params['mac_pool_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/pools/mac-pools", logical_switch_params.pop('mac_pool_name', None)) if 'mac_pool_name' in logical_switch_params and 'mac_pool_id' in logical_switch_params: logical_switch_params.pop('mac_pool_name', None) logical_switch_params['transport_zone_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-zones", logical_switch_params.pop('transport_zone_name', None)) switch_profiles = logical_switch_params.pop('switching_profiles', None) switch_profile_ids = [] for switch_profile in switch_profiles or []: profile_obj = {} profile_obj['value'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/switching-profiles", switch_profile['name']) profile_obj['key'] = switch_profile['type'] switch_profile_ids.append(profile_obj) logical_switch_params['switching_profile_ids'] = switch_profile_ids return logical_switch_params def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, logical_switch_with_ids): existing_logical_switch = get_lswitch_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, logical_switch_with_ids['display_name']) if existing_logical_switch is None: return False if existing_logical_switch.__contains__('vlan') and logical_switch_with_ids.__contains__('vlan') and \ existing_logical_switch['vlan'] != logical_switch_with_ids['vlan']: return True if logical_switch_with_ids.__contains__('vlan_trunk_spec') and existing_logical_switch.__contains__('vlan_trunk_spec') and \ existing_logical_switch['vlan_trunk_spec']['vlan_ranges'] != logical_switch_with_ids['vlan_trunk_spec']['vlan_ranges']: return True if existing_logical_switch.__contains__('switching_profile_ids') and logical_switch_with_ids.__contains__('switching_profile_ids') and \ existing_logical_switch['switching_profile_ids'] != logical_switch_with_ids['switching_profile_ids']: return True if existing_logical_switch['admin_state'] != logical_switch_with_ids['admin_state']: return True if existing_logical_switch.__contains__('replication_mode') and logical_switch_with_ids.__contains__('replication_mode') and \ existing_logical_switch['replication_mode'] != logical_switch_with_ids['replication_mode']: return True if existing_logical_switch.__contains__('hybrid') and logical_switch_with_ids.__contains__('hybrid') and \ existing_logical_switch['hybrid'] != logical_switch_with_ids['hybrid']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), switch_type=dict(required=False, type='str'), replication_mode=dict(required=False, type='str'), extra_configs=dict(required=False, type='list'), uplink_teaming_policy_name=dict(required=False, type='str'), transport_zone_name=dict(required=True, type='str'), ip_pool_name=dict(required=False, type='str'), vlan=dict(required=False, type='int'), hybrid=dict(required=False, type='bool'), mac_pool_id=dict(required=False, type='str'), mac_pool_name=dict(required=False, type='str'), vni=dict(required=False, type='int'), vlan_trunk_spec=dict(required=False, type='dict', vlan_ranges=dict(required=True, type='list')), admin_state=dict(required=True, type='str'), address_bindings=dict(required=False, type='list'), switching_profiles=dict(required=False, type='list'), lswitch_id=dict(required=False, type='str'), description=dict(required=False, type='str'), span=dict(required=False, type='list'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) logical_switch_params = get_logical_switch_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) validate_nsx_mp_support(module, manager_url, mgr_username, mgr_password, validate_certs) changed = True lswitch_dict = get_lswitch_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) lswitch_id, revision = None, None if lswitch_dict: lswitch_id = lswitch_dict['id'] revision = lswitch_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, logical_switch_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if lswitch_id is None: # add the logical_switch if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id=lswitch_id) request_data = json.dumps(body) try: if lswitch_id: module.exit_json(changed=False, id=lswitch_id, message="Logical switch with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/logical-switches', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add logical switch. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Logical switch with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id=lswitch_id) body['_revision'] = revision # update current revision request_data = json.dumps(body) id = lswitch_id try: (rc, resp) = request(manager_url+ '/logical-switches/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update logical switch with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="logical switch with lswitch id %s updated." % id) elif state == 'absent': # delete the array id = lswitch_id if id is None: module.exit_json(changed=False, msg='No logical switch exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_switch_params)), id=id) try: (rc, resp) = request(manager_url + "/logical-switches/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete logical switch with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="Logical switch with zone id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_logical_switches_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_logical_switches_facts short_description: List All Logical Switches description: Returns information about all configured logical switches. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List All Logical Switches nsxt_logical_switches_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/logical-switches', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of logical ports. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_manager_auto_deployment.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_manager_auto_deployment short_description: 'Deploy and register a cluster node VM' description: "Deploys a cluster node VM as specified by the deployment config. Once the VM is deployed and powered on, it will automatically join the existing cluster." version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str deployment_requests: description: 'Cluster node VM deployment requests to be deployed by the Manager.' required: true type: 'array of ClusterNodeVMDeploymentRequest' node_id: description: 'Unique node-id of a principal' required: false type: str node_name: description: 'Unique node-name of a principal' required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Deploy and register a cluster node VM nsxt_manager_auto_deployment: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False deployment_requests: - roles: - MANAGER - CONTROLLER form_factor: "MEDIUM" user_settings: cli_password: "Admin!23Admin" root_password: "Admin!23Admin" deployment_config: placement_type: VsphereClusterNodeVMDeploymentConfig vc_id: "7503e86e-c502-46fc-8d91-45a06d314d88" management_network: "network-44" ignore_ssl_verification: True disk_provisioning: "LAZY_ZEROED_THICK" hostname: "manager-2" compute: "domain-c49" storage: "datastore-43" default_gateway_addresses: - 10.112.203.253 management_port_subnets: - ip_addresses: - 10.112.201.25 prefix_length: "19" management_port_ipv6_subnets: - ip_addresses: - 2620:124:6020:1045::1c prefix_length: "64" default_ipv6_gateway_addresses: - 2620:124:6020:1045::253 state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, get_vc_ip_from_display_name from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vcenter_utils import get_resource_id_from_name from ansible.module_utils._text import to_native FAILED_STATES = ["UNKNOWN_STATE", "VM_DEPLOYMENT_FAILED", "VM_POWER_ON_FAILED", "VM_ONLINE_FAILED", "VM_CLUSTERING_FAILED", "VM_DECLUSTER_FAILED", "VM_POWER_OFF_FAILED", "VM_UNDEPLOY_FAILED"] IN_PROGRESS_STATES = ["VM_DEPLOYMENT_QUEUED", "VM_DEPLOYMENT_IN_PROGRESS", "VM_POWER_ON_IN_PROGRESS", "WAITING_TO_REGISTER_VM", "VM_WAITING_TO_CLUSTER", "VM_WAITING_TO_COME_ONLINE", "VM_CLUSTERING_IN_PROGRESS", "WAITING_TO_UNDEPLOY_VM", "VM_DECLUSTER_IN_PROGRESS", "VM_POWER_OFF_IN_PROGRESS", "VM_UNDEPLOY_IN_PROGRESS", "VM_UNDEPLOY_SUCCESSFUL"] SUCCESS_STATES = ["VM_CLUSTERING_SUCCESSFUL", "VM_DECLUSTER_SUCCESSFUL"] def get_node_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs', 'node_id'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_nodes(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/cluster/nodes/deployments', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing controller-manager node. Error [%s]' % (to_native(err))) return resp def check_node_exist(existing_nodes_data, module): new_deployment_requests = module.params['deployment_requests'] for result in existing_nodes_data['results']: for new_deployment_request in new_deployment_requests: if result['deployment_config']['hostname'] == new_deployment_request['deployment_config']['hostname']: return True, result['deployment_config']['hostname'] return False, None def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, node_params ): for deployment_request in node_params['deployment_requests']: vc_name = deployment_request['deployment_config'].pop('vc_name', None) deployment_request['deployment_config']['vc_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/fabric/compute-managers", vc_name) return node_params def wait_till_create(vm_id, module, manager_url, mgr_username, mgr_password, validate_certs): try: while True: (rc, resp) = request(manager_url+ '/cluster/nodes/deployments/%s/status'% vm_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if any(resp['status'] in progress_status for progress_status in IN_PROGRESS_STATES): time.sleep(10) elif any(resp['status'] in progress_status for progress_status in SUCCESS_STATES): time.sleep(5) return else: module.fail_json(msg= 'Error in controller-manager node deployment: %s'%(str(resp['status']))) except Exception as err: module.fail_json(msg='Error accessing controller-manager node status. Error [%s]' % (to_native(err))) def wait_till_delete(vm_id, module, manager_url, mgr_username, mgr_password, validate_certs): try: count = 0; #Wait for maximum 10 minute for vm deletion while True and count < 20: (rc, resp) = request(manager_url+ '/cluster/nodes/deployments/%s/status'% vm_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if (resp == {}): time.sleep(10) break time.sleep(30) count = count + 1 except Exception as err: time.sleep(5) return def get_node_id_from_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): ''' Given Name of the auto deployed node, This function retrieves the node id. If not found it fails. ''' try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing vm id for host name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('deployment_config') and result['deployment_config'].__contains__('hostname') and \ result['deployment_config']['hostname'] == display_name: if result.__contains__('vm_id'): return result['vm_id'] module.fail_json(msg='No auto deployed node exist with display name %s' % display_name) def inject_vcenter_info(module, manager_url, mgr_username, mgr_password, validate_certs, node_params): ''' params: - transport_node_params: These are the transport node parameters passed from playbook file result: - takes the vecenter parameters accepted by playbook and converts it into the form accepted by cluster node deployment api using pyvmomi functions. ''' for deployment_request in node_params['deployment_requests']: deployment_config = deployment_request['deployment_config'] if deployment_config.__contains__('ignore_ssl_verification'): ignore_ssl_verification = deployment_config['ignore_ssl_verification'] else: ignore_ssl_verification = True if deployment_config.__contains__('vc_username') and deployment_config.__contains__('vc_password'): vc_name = deployment_config['vc_name'] vc_ip = get_vc_ip_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/fabric/compute-managers", vc_name) vc_username = deployment_config.pop('vc_username', None) vc_password = deployment_config.pop('vc_password', None) if deployment_config.__contains__('host'): host = deployment_config.pop('host', None) host_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'host', host, ignore_ssl_verification) deployment_request['deployment_config']['host_id'] = str(host_id) storage = deployment_config.pop('storage') storage_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'storage', storage, ignore_ssl_verification) deployment_request['deployment_config']['storage_id'] = str(storage_id) cluster = deployment_config.pop('compute') cluster_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'cluster', cluster, ignore_ssl_verification) deployment_request['deployment_config']['compute_id'] = str(cluster_id) management_network = deployment_config.pop('management_network') management_network_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'network', management_network, ignore_ssl_verification) deployment_request['deployment_config']['management_network_id'] = str(management_network_id) if deployment_config.__contains__('host'): deployment_request['deployment_config'].pop('host', None) deployment_request['deployment_config'].pop('cluster', None) deployment_request['deployment_config'].pop('storage', None) deployment_request['deployment_config'].pop('management_network', None) deployment_request['deployment_config'].pop('ignore_ssl_verification', None) else: if deployment_config.__contains__('host'): host_id = deployment_request['deployment_config'].pop('host', None) deployment_request['deployment_config']['host_id'] = host_id cluster_id = deployment_request['deployment_config'].pop('compute', None) storage_id = deployment_request['deployment_config'].pop('storage', None) management_network_id = deployment_request['deployment_config'].pop('management_network', None) deployment_request['deployment_config'].pop('ignore_ssl_verification', None) deployment_request['deployment_config']['compute_id'] = cluster_id deployment_request['deployment_config']['storage_id'] = storage_id deployment_request['deployment_config']['management_network_id'] = management_network_id def main(): argument_spec = vmware_argument_spec() argument_spec.update(deployment_requests=dict(required=True, type='list'), node_name=dict(required=False, type='str'), node_id=dict(required=False, type='str'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) node_params = get_node_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' inject_vcenter_info(module, manager_url, mgr_username, mgr_password, validate_certs, node_params) update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, node_params) request_data = json.dumps(node_params) results = get_nodes(module, manager_url, mgr_username, mgr_password, validate_certs) is_node_exist, hostname = check_node_exist(results, module) if state == 'present': # add Manager Controller node if is_node_exist: module.exit_json(changed=False, message="Controller-manager node with hostname %s already exist."% hostname) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data)) try: (rc, resp) = request(manager_url+ '/cluster/nodes/deployments', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add controller-manager node. Request body [%s]. Error[%s]." % (request_data, to_native(err))) for node in resp['results']: wait_till_create(node['vm_id'], module, manager_url, mgr_username, mgr_password, validate_certs) time.sleep(5) module.exit_json(changed=True, body= str(resp), message="Controller-manager node deployed.") elif state == 'absent': id = None if module.params['node_id']: id = module.params['node_id'] elif module.params['node_name']: node_name = module.params['node_name'] else: module.fail_json(msg="Failed to delete manager node as non of node_id, node_name is provided.") if not id: id = get_node_id_from_name(module, manager_url, mgr_username, mgr_password, validate_certs, '/cluster/nodes/deployments', node_name) if is_node_exist: # delete node if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data)) try: (rc, resp) = request(manager_url+ '/cluster/nodes/deployments/%s?action=delete' % id, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to delete controller-manager node with id %s. Error[%s]." % (id, to_native(err))) else: module.fail_json(msg="Controller-manager node with id %s does not exist." % id) wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs) time.sleep(5) module.exit_json(changed=True, id=id, message="Controller-manager node with node id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_manager_auto_deployment_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: nsxt_manager_auto_deployment_facts short_description: 'Returns info for all cluster node VM auto-deployment attempts' description: 'Returns request information for every attempted deployment of a cluster node VM' version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - nsxt_manager_auto_deployment_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/cluster/nodes/deployments', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing controllers. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_manager_status.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_manager_status short_description: Shows status of nsxt manager description: Shows status of nsxt manager version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Shows status of nsxt manager nsxt_manager_status: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False wait_time: 50 ''' RETURN = '''# ''' import json, time from datetime import datetime from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update(wait_time=dict(required=False, type='int')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False wait_time = 10 # wait till 30 min while wait_time < (module.params['wait_time'] *60): try: current_time = datetime.now() (rc, resp) = request(manager_url+ '/cluster-manager/status', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if "overall_status" in resp and resp["overall_status"] == "STABLE": module.exit_json(changed=changed, msg= " NSX manager is UP") else: time_diff = datetime.now() - current_time time.sleep(10) wait_time = time_diff.seconds + wait_time + 10 except Exception as err: time_diff = datetime.now() - current_time time.sleep(10) wait_time = time_diff.seconds + wait_time + 10 module.fail_json(changed=changed, msg= " Error accessing nsx manager. Timed out") if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_policy_bfd_profile.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_bfd_profile short_description: Create or Delete a Policy BFD Profile description: Creates or deletes a Policy BFD Profile. Required attributes include id and display_name. version_added: "2.8" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the BFD Profile. required: true type: str description: description: BFD Profile description. type: str interval: description: - Time interval between heartbeat packets in milliseconds - Should be in the range [50-60000] type: int default: 500 multiple: description: - Declare dead multiple. - Number of times heartbeat packet is missed before BFD declares the neighbor is down. - Should be in the range [2-16] type: int default: 3 ''' EXAMPLES = ''' - name: Update BFD Profile nsxt_policy_bfd_profile: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False display_name: test-bfd-profile state: present interval: 200 multiple: 10 ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import BFD_PROFILE_URL from ansible.module_utils._text import to_native class NSXTBFDProfile(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): bfd_profile_arg_spec = {} bfd_profile_arg_spec.update( interval=dict( default=500, type='int' ), multiple=dict( default=3, type='int' ) ) return bfd_profile_arg_spec @staticmethod def get_resource_base_url(baseline_args=None): return BFD_PROFILE_URL if __name__ == '__main__': bfd_profile = NSXTBFDProfile() bfd_profile.realize() ================================================ FILE: plugins/modules/nsxt_policy_gateway_policy.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_gateway_policy short_description: Update a Gateway Policy description: Updates a Gateway Policy Required attributes include id or display_name version_added: "2.8" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the Gateway Policy required: false type: str description: description: Gateway Policy description. type: str category: description: Policy Framework for Edge Firewall provides six pre-defined categories - "Emergency", "SystemRules", "SharedPreRules", "LocalGatewayRules", "AutoServiceRules" and "Default", in order of priority of rules. All categories are allowed for Gatetway Policies that belong to 'default' Domain. However, for user created domains, category is restricted to "SharedPreRules" or "LocalGatewayRules" only. Also, the users can add/modify/delete rules from only the "SharedPreRules" and "LocalGatewayRules" categories. If user doesn't specify the category then defaulted to "Rules". System generated category is used by NSX created rules, for example BFD rules. Autoplumbed category used by NSX verticals to autoplumb data path rules. Finally, "Default" category is the placeholder default rules with lowest in the order of priority required: false type: str choices: - Emergency - SystemRules - SharedPreRules - LocalGatewayRules - AutoServiceRules - Default default: Default comments: description: Comments for security policy lock/unlock required: false type: str locked: description: Indicates whether a security policy should be locked. If the security policy is locked by a user, then no other user would be able to modify this security policy. Once the user releases the lock, other users can update this security policy required: false type: bool default: false rules: description: Rules that are a part of this GatewayPolicy type: list suboptions: action: description: The action to be applied to all the services type: str choices: - "ALLOW" - "DROP" - "REJECT" description: description: Description of this resource type: str destination_groups: description: Destination group paths type: list required: true destinations_excluded: description: Negation of destination groups If set to true, the rule gets applied on all the groups that are NOT part of the destination groups. If false, the rule applies to the destination groups. type: bool default: false direction: description: Define direction of traffic. type: str choices: - IN - OUT - IN_OUT disabled: description: Flag to disable the rule type: bool default: false display_name: description: Identifier to use when displaying entity in logs or GUI. Defaults to ID if not set type: str id: description: Unique identifier of this resource type: str required: true ip_protocol: description: - IPv4 vs IPv6 packet type - Type of IP packet that should be matched while enforcing the rule. The value is set to IPV4_IPV6 for Layer3 rule if not specified. For Layer2/Ether rule the value must be null. type: str choices: - IPV4 - IPV6 - IPV4_IPV6 logged: description: Flag to enable packet logging. Default is disabled. type: bool default: false notes: description: Text for additional notes on changes type: str profiles: description: - Layer 7 service profiles - Holds the list of layer 7 service profile paths. These profiles accept attributes and sub-attributes of various network services (e.g. L4 AppId, encryption algorithm, domain name, etc) as key value pairs type: list scope: description: The list of policy paths where the rule is applied LR/Edge/T0/T1/LRP etc. Note that a given rule can be applied on multiple LRs/LRPs type: list sequence_number: description: Sequence number of the this Rule type: int service_entries: description: - Raw services - In order to specify raw services this can be used, along with services which contains path to services. This can be empty or null type: list elements: dict services: description: Paths of services In order to specify all services, use the constant "ANY". This is case insensitive. If "ANY" is used, it should be the ONLY element in the services array. Error will be thrown if ANY is used in conjunction with other values. type: list required: true source_groups: description: Source group paths type: list required: true sources_excluded: description: Negation of source groups If set to true, the rule gets applied on all the groups that are NOT part of the source groups. If false, the rule applies to the source groups type: bool default: false tag: description: - Tag applied on the rule - User level field which will be printed in CLI and packet logs. type: str tags: description: Opaque identifiers meaningful to the API user type: list elements: dict suboptions: scope: description: Tag scope type: str tag: description: Tag value type: str scheduler_path: description: - Path to the scheduler for time based scheduling - Provides a mechanism to apply the rules in this policy for a specified time duration required: false type: str scope: description: The list of group paths where the rules in this policy will get applied. This scope will take precedence over rule level scope. Supported only for security and redirection policies. In case of RedirectionPolicy, it is expected only when the policy is NS and redirecting to service chain. required: false type: list element: str sequence_number: description: - Sequence number to resolve conflicts across Domains - This field is used to resolve conflicts between security policies across domains. In order to change the sequence number of a policy one can fire a POST request on the policy entity with a query parameter action=revise The sequence number field will reflect the value of the computed sequence number upon execution of the above mentioned POST request. For scenarios where the administrator is using a template to update several security policies, the only way to set the sequence number is to explicitly specify the sequence number for each security policy. If no sequence number is specified in the payload, a value of 0 is assigned by default. If there are multiple policies with the same sequence number then their order is not deterministic. If a specific order of policies is desired, then one has to specify unique sequence numbers or use the POST request on the policy entity with a query parameter action=revise to let the framework assign a sequence number required: false type: int stateful: description: - Stateful nature of the entries within this security policy. - Stateful or Stateless nature of security policy is enforced on all rules in this security policy. When it is stateful, the state of the network connects are tracked and a stateful packet inspection is performed. Layer3 security policies can be stateful or stateless. By default, they are stateful. Layer2 security policies can only be stateless. required: false type: bool tcp_strict: description: - Enforce strict tcp handshake before allowing data packets - Ensures that a 3 way TCP handshake is done before the data packets are sent. tcp_strict=true is supported only for stateful security policies. required: false type: bool ''' EXAMPLES = ''' - name: Update Gateway Policy nsxt_policy_gateway_policy: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False display_name: test-gateway-policy state: present ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import GATEWAY_POLICY_URL from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_resource_specs.security_policy import SPEC as SecurityPolicySpec from ansible.module_utils._text import to_native class NSXTGatewayPolicy(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): gateway_policy_arg_spec = {} gateway_policy_arg_spec.update( SecurityPolicySpec ) gateway_policy_arg_spec.pop('connectivity_strategy') return gateway_policy_arg_spec @staticmethod def get_resource_base_url(baseline_args): return GATEWAY_POLICY_URL.format( baseline_args["domain_id"]) def update_resource_params(self, nsx_resource_params): nsx_resource_params.pop('domain_id') if __name__ == '__main__': gw_policy = NSXTGatewayPolicy() gw_policy.realize(baseline_arg_names=["domain_id"]) ================================================ FILE: plugins/modules/nsxt_policy_group.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_group short_description: Create or Delete a Policy Policy Group description: Creates or deletes a Policy Policy Group. Required attributes include id and display_name. version_added: "2.8" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the Policy Policy Group. required: false type: str description: description: Policy Group description. type: str domain_id: description: Domain ID. type: str expression: description: - The expression list must follow below criteria - 1. A non-empty expression list, must be of odd size. In a list, with indices starting from 0, all non-conjunction expressions must be at even indices, separated by a conjunction expression at odd indices. - 2. The total of ConditionExpression and NestedExpression in a list should not exceed 5. - 3. The total of IPAddressExpression, MACAddressExpression, external IDs in an ExternalIDExpression and paths in a PathExpression must not exceed 500. - 4. Each expression must be a valid Expression. See the definition of the Expression type for more information. type: list extended_expression: description: - Extended Expression allows additional higher level context to be specified for grouping criteria (e.g. user AD group). This field allow users to specified user context as the source of a firewall rule for IDFW feature. Current version only support a single IdentityGroupExpression. In the future, this might expand to support other conjunction and non-conjunction expression. - The extended expression list must follow below criteria - 1. Contains a single IdentityGroupExpression. No conjunction expression is supported - 2. No other non-conjunction expression is supported, except for IdentityGroupExpression - 3. Each expression must be a valid Expression. See the definition of the Expression type for more information - 4. Extended expression are implicitly AND with expression - 5. No nesting can be supported if this value is used - 6. If a Group is using extended expression, this group must be the only member in the source field of an communication map type: list group_state: description: Realization state of this group type: str choices: - IN_PROGRESS - SUCCESS - FAILURE ''' EXAMPLES = ''' - name: create Policy Group nsxt_policy_group: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False id: test-lb-service display_name: test-lb-service state: "present" domain_id: "default" expression: - member_type: "VirtualMachine" value: "webvm" key: "Tag" operator: "EQUALS" resource_type: "Condition" ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import POLICY_GROUP_URL from ansible.module_utils._text import to_native class NSXTPolicyGroup(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): policy_group_arg_spec = {} policy_group_arg_spec.update( domain_id=dict( required=True, type='str' ), expression=dict( required=True, type='list' ), extended_expression=dict( required=False, type='list' ), group_state=dict( required=False, type='str' ), ) return policy_group_arg_spec @staticmethod def get_resource_base_url(baseline_args): return POLICY_GROUP_URL.format( baseline_args["domain_id"] ) def update_resource_params(self, nsx_resource_params): nsx_resource_params.pop('domain_id') if __name__ == '__main__': policy_group = NSXTPolicyGroup() policy_group.realize(baseline_arg_names=["domain_id"]) ================================================ FILE: plugins/modules/nsxt_policy_ip_block.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_ip_block short_description: Create or Delete a Policy IP Block description: Creates or deletes a Policy IP Block. Required attributes include id and display_name. version_added: "2.8" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the Policy IP Block. required: false type: str description: description: IP Block description. type: str cidr: description: - A contiguous IP address space represented by network address and prefix length - Represents a network address and the prefix length which will be associated with a layer-2 broadcast domain. Support only IPv4 CIDR. required: true type: str ''' EXAMPLES = ''' - name: create IP Block nsxt_policy_ip_block: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False id: test-ip-blk display_name: test-ip-blk state: "present" cidr: "192.168.0.0/16" ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import IP_BLOCK_URL from ansible.module_utils._text import to_native class NSXTIpBlock(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): ip_block_arg_spec = {} ip_block_arg_spec.update( cidr=dict( required=True, type='str' ) ) return ip_block_arg_spec @staticmethod def get_resource_base_url(baseline_args=None): return IP_BLOCK_URL if __name__ == '__main__': ip_block = NSXTIpBlock() ip_block.realize() ================================================ FILE: plugins/modules/nsxt_policy_ip_block_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_ip_blocks_facts short_description: Returns list of configured IP address blocks. description: Returns information about configured IP address blocks. Information includes the id, display name, description & CIDR of IP address blocks version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Lists all configured IP address blocks nsxt_ip_block_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/policy/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(f"{manager_url}/infra/ip-pools", headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of ip blocks. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_policy_ip_pool.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_ip_pool short_description: Create or Delete a Policy IP Pool description: Creates or deletes a Policy IP Pool. Required attributes include id and display_name. version_added: "2.8" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the Policy IP Pool. required: false type: str description: description: Resource description. type: str pool_block_subnets: type: list element: dict description: Specify the IP Pool Block Subnets that need to be created, updated, or deleted as a list of dict in this section suboptions: auto_assign_gateway: description: - Indicate whether default gateway is to be reserved from the range - If this property is set to true, the first IP in the range will be reserved for gateway. type: bool default: true description: description: Resource description. type: str display_name: description: - Display name. - If resource ID is not specified, display_name will be used as ID. required: false type: str do_wait_till_create: type: bool default: false description: Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager id: description: The id of the Policy IP Pool Block Subnet. required: false type: str ip_block_display_name: description: Same as ip_block_id. Either one must be specified. If both are specified, ip_block_id takes precedence. required: false type: str ip_block_id: description: The ID of the IpAddressBlock from which the subnet is to be created type: str size: description: - Represents the size or number of IP addresses in the subnet - The size parameter is required for subnet creation. It must be specified during creation but cannot be changed later. type: int state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true tags: description: Opaque identifiers meaningful to the API user. type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str pool_static_subnets: type: list element: dict description: Specify the IP Pool Static Subnets that need to be created, updated, or deleted as a list of dict in this section suboptions: allocation_ranges: description: A collection of IPv4 or IPv6 IP Pool Ranges. type: list element: dict suboptions: start: description: The start IP Address of the IP Range. type: str required: true end: description: The end IP Address of the IP Range. type: str required: true cidr: description: Subnet representation is a network address and prefix length type: str required: true description: description: Resource description. type: str display_name: description: - Display name. - If resource ID is not specified, display_name will be used as ID. required: false type: str dns_nameservers: description: The collection of upto 3 DNS servers for the subnet. type: list element: str dns_suffix: description: The DNS suffix for the DNS server. type: str do_wait_till_create: type: bool default: false description: Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager gateway_ip: description: The default gateway address on a layer-3 router. type: str id: description: The id of the Policy IP Pool Block Subnet. required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." tags: description: Opaque identifiers meaningful to the API user. type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str ''' EXAMPLES = ''' - name: create IP Pool nsxt_policy_ip_pool: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False id: test-ip-pool display_name: test-ip-pool state: "absent" tags: - tag: "a" scope: "b" pool_block_subnets: - id: test-ip-subnet-1 state: present ip_block_id: "test-ip-blk-1" size: 16 - display_name: test-ip-subnet-2 state: present ip_block_id: "test-ip-blk-1" size: 16 - display_name: test-ip-subnet-3 state: present ip_block_id: "test-ip-blk-1" size: 8 pool_static_subnets: - id: test-ip-static-subnet-1 state: present allocation_ranges: - start: '192.116.0.10' end: '192.116.0.20' - start: '192.116.0.30' end: '192.116.0.40' cidr: '192.116.0.0/26' - display_name: test-ip-static-subnet-2 state: present allocation_ranges: - start: '192.116.1.10' end: '192.116.1.20' - start: '192.116.1.30' end: '192.116.1.40' cidr: '192.116.1.0/26' ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import ( IP_ADDRESS_POOL_SUBNET_URL, IP_BLOCK_URL, IP_POOL_URL) from ansible.module_utils._text import to_native class NSXTIpPool(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): ip_pool_arg_spec = {} return ip_pool_arg_spec @staticmethod def get_resource_base_url(baseline_args=None): return IP_POOL_URL def update_parent_info(self, parent_info): parent_info["ip_pool_id"] = self.id class NSXTIpAddressPoolBlockSubnet(NSXTBaseRealizableResource): def get_spec_identifier(self): return (NSXTIpPool.NSXTIpAddressPoolBlockSubnet. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "pool_block_subnets" @staticmethod def get_resource_spec(): ip_addr_pool_blk_subnet_arg_spec = {} ip_addr_pool_blk_subnet_arg_spec.update( ip_block_id=dict( required=False, type='str' ), ip_block_display_name=dict( required=False, type='str' ), auto_assign_gateway=dict( required=False, type='bool' ), size=dict( required=True, type='int' ), start_ip=dict( required=False, type='str' ), ) return ip_addr_pool_blk_subnet_arg_spec @staticmethod def get_resource_base_url(parent_info): return IP_ADDRESS_POOL_SUBNET_URL.format( parent_info["ip_pool_id"] ) def update_resource_params(self, nsx_resource_params): # ip_block is a required attr ip_block_id = self.get_id_using_attr_name_else_fail( "ip_block", nsx_resource_params, IP_BLOCK_URL, "IP Block") nsx_resource_params["ip_block_path"] = ( IP_BLOCK_URL + "/" + ip_block_id) nsx_resource_params["resource_type"] = "IpAddressPoolBlockSubnet" class NSXTIpAddressPoolStaticSubnet(NSXTBaseRealizableResource): def get_spec_identifier(self): return (NSXTIpPool.NSXTIpAddressPoolStaticSubnet. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "pool_static_subnets" @staticmethod def get_resource_spec(): ip_addr_pool_static_subnet_arg_spec = {} ip_addr_pool_static_subnet_arg_spec.update( auto_assign_gateway=dict( required=False, type='bool' ), allocation_ranges=dict( required=True, elements='dict', type='list', options=dict( start=dict( required=True, type='str' ), end=dict( required=True, type='str' ), ) ), cidr=dict( required=True, type='str' ), dns_nameservers=dict( required=False, elements='str', type='list' ), dns_suffix=dict( required=False, type='str' ), gateway_ip=dict( required=False, type='str' ), ) return ip_addr_pool_static_subnet_arg_spec @staticmethod def get_resource_base_url(parent_info): return IP_ADDRESS_POOL_SUBNET_URL.format( parent_info["ip_pool_id"] ) def update_resource_params(self, nsx_resource_params): nsx_resource_params["resource_type"] = "IpAddressPoolStaticSubnet" if __name__ == '__main__': ip_pool = NSXTIpPool() ip_pool.realize() ================================================ FILE: plugins/modules/nsxt_policy_ip_pool_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_ip_pools_facts short_description: List IP Pools description: Returns information about the configured IP address pools. Information includes the display name and description of the pool and the details of each of the subnets in the pool, including the DNS servers, allocation ranges, gateway, and CIDR subnet address. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List IP Pools nsxt_ip_pools_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/policy/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(f"{manager_url}/infra/ip-blocks", headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing list of ip pools. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_policy_l2_bridge_ep_profile.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_l2_bridge_ep_profile short_description: Create or Delete a Policy L2 Bridge Endpoint Profile description: Creates or deletes a Policy L2 Bridge Endpoint Profile Required attributes include id and display_name. version_added: "2.9" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the Policy L2 Bridge Endpoint Profile required: false type: str description: description: Resource description. type: str edge_nodes_info: description: - List of dicts that comprise of information to form policy paths to edge nodes. Edge allocation for L2 bridging - Minimim 1 and Maximum 2 list elements type: list element: dict suboptions: site_id: description: site_id where edge node is located default: default type: str enforcementpoint_id: description: enforcementpoint_id where edge node is located default: default type: str edge_cluster_id: description: edge_cluster_id where edge node is located type: str edge_cluster_display_name: description: - display name of the edge cluster - either this or edge_cluster_id must be specified. If both are specified, edge_cluster_id takes precedence type: str edge_node_id: description: ID of the edge node type: str edge_node_display_name: description: - Display name of the edge node. - either this or edge_node_id must be specified. If both are specified, edge_node_id takes precedence type: str failover_mode: description: Failover mode for the edge bridge cluster type: str default: PREEMPTIVE choices: - PREEMPTIVE - NON_PREEMPTIVE ha_mode: description: High avaialability mode can be active-active or active-standby. High availability mode cannot be modified after realization type: str default: ACTIVE_STANDBY choices: - ACTIVE_STANDBY ''' EXAMPLES = ''' - name: create L2 Bridge Endpoint Profile nsxt_policy_l2_bridge_ep_profile: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False id: test-ep-profile display_name: test-ep-profile state: present edge_nodes_info: - edge_cluster_display_name: edge-cluster-1 edge_node_id: 123471da-3823-11ea-9170-000c291a8262 failover_mode: PREEMPTIVE ha_mode: ACTIVE_STANDBY tags: - tag: "my-tag" scope: "my-scope" ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import ( EDGE_CLUSTER_URL, EDGE_NODE_URL, L2_BRIDGE_EP_PROFILE_URL) from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_resource_specs.l2_bridge_ep_profile import SPEC as L2BridgeEpProfileSpec from ansible.module_utils._text import to_native class NSXTL2BridgeEpProfile(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): return L2BridgeEpProfileSpec @staticmethod def get_resource_base_url(baseline_args=None): return L2_BRIDGE_EP_PROFILE_URL.format( baseline_args['site_id'], baseline_args['enforcementpoint_id']) def update_resource_params(self, nsx_resource_params): nsx_resource_params.pop('site_id') nsx_resource_params.pop('enforcementpoint_id') edge_nodes_info = nsx_resource_params.pop( "edge_nodes_info") nsx_resource_params["edge_paths"] = [] for edge_node_info in edge_nodes_info: site_id = edge_node_info['site_id'] enforcementpoint_id = edge_node_info['enforcementpoint_id'] edge_cluster_base_url = ( EDGE_CLUSTER_URL.format(site_id, enforcementpoint_id)) edge_cluster_id = self.get_id_using_attr_name_else_fail( "edge_cluster", edge_node_info, edge_cluster_base_url, "Edge Cluster") edge_node_base_url = EDGE_NODE_URL.format( site_id, enforcementpoint_id, edge_cluster_id) edge_node_id = self.get_id_using_attr_name_else_fail( "edge_node", edge_node_info, edge_node_base_url, "Edge Node") nsx_resource_params["edge_paths"].append( edge_node_base_url + "/" + edge_node_id) if __name__ == '__main__': l2_bridge_ep_profile = NSXTL2BridgeEpProfile() l2_bridge_ep_profile.realize(baseline_arg_names=[ 'site_id', 'enforcementpoint_id']) ================================================ FILE: plugins/modules/nsxt_policy_security_policy.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_security_policy short_description: Create or Delete a Policy Security Policy description: Creates or deletes a Policy Security Policy. Required attributes include id and display_name. version_added: "2.8" author: Gautam Verma extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: The id of the Policy Security Policy. required: false type: str description: description: Security Policy description. type: str domain_id: description: The domain id where the Security Policy is realized. type: str required: true category: description: - A way to classify a security policy, if needed. - Distributed Firewall - Policy framework provides five pre-defined categories for classifying a security policy. They are "Ethernet",Emergency", "Infrastructure", "Environment" and "Application". There is a pre-determined order in which the policy framework manages the priority of these security policies. Ethernet category is for supporting layer 2 firewall rules. The other four categories are applicable for layer 3 rules. Amongst them, the Emergency category has the highest priority followed by Infrastructure, Environment and then Application rules. Administrator can choose to categorize a security policy into the above categories or can choose to leave it empty. If empty it will have the least precedence w.r.t the above four categories. - Edge Firewall - Policy Framework for Edge Firewall provides six pre-defined categories "Emergency", "SystemRules", "SharedPreRules", "LocalGatewayRules", "AutoServiceRules" and "Default", in order of priority of rules. All categories are allowed for Gatetway Policies that belong to 'default' Domain. However, for user created domains, category is restricted to "SharedPreRules" or "LocalGatewayRules" only. Also, the users can add/modify/delete rules from only the "SharedPreRules" and "LocalGatewayRules" categories. If user doesn't specify the category then defaulted to "Rules". System generated category is used by NSX created rules, for example BFD rules. Autoplumbed category used by NSX verticals to autoplumb data path rules. Finally, "Default" category is the placeholder default rules with lowest in the order of priority. type: str comments: type: str description: SecurityPolicy lock/unlock comments connectivity_strategy: type: str description: - Connectivity strategy applicable for this SecurityPolicy - This field indicates the default connectivity policy for the security policy. Based on the connectivitiy strategy, a default rule for this security policy will be created. An appropriate action will be set on the rule based on the value of the connectivity strategy. If NONE is selected or no connectivity strategy is specified, then no default rule for the security policy gets created. The default rule that gets created will be a any-any rule and applied to entities specified in the scope of the security policy. Specifying the connectivity_strategy without specifying the scope is not allowed. The scope has to be a Group and one cannot specify IPAddress directly in the group that is used as scope. This default rule is only applicable for the Layer3 security policies - WHITELIST - Adds a default drop rule. Administrator can then use "allow" rules (aka whitelist) to allow traffic between groups - BLACKLIST - Adds a default allow rule. Admin can then use "drop" rules (aka blacklist) to block traffic between groups - WHITELIST_ENABLE_LOGGING - Whitelising with logging enabled - BLACKLIST_ENABLE_LOGGING - Blacklisting with logging enabled - NONE - No default rule is created locked: type: bool description: - Lock a security policy - Indicates whether a security policy should be locked. If the security policy is locked by a user, then no other user would be able to modify this security policy. Once the user releases the lock, other users can update this security policy. scheduler_path: type: str description: - Path to the scheduler for time based scheduling - Provides a mechanism to apply the rules in this policy for a specified time duration. scope: description: The list of group paths where the rules in this policy will get applied. This scope will take precedence over rule level scope. Supported only for security policies. type: list sequence_number: description: Sequence number to resolve conflicts across Domains type: int stateful: type: bool description: - Stateful nature of the entries within this security policy. - Stateful or Stateless nature of security policy is enforced on all rules in this security policy. When it is stateful, the state of the network connects are tracked and a stateful packet inspection is performed. - Layer3 security policies can be stateful or stateless. By default, they are stateful. - Layer2 security policies can only be stateless. rules: description: Rules that are a part of this SecurityPolicy type: list suboptions: action: description: The action to be applied to all the services type: str choices: - "ALLOW" - "DROP" - "REJECT" description: description: Description of this resource type: str destination_groups: description: Destination group paths type: list required: true destinations_excluded: description: Negation of destination groups If set to true, the rule gets applied on all the groups that are NOT part of the destination groups. If false, the rule applies to the destination groups. type: bool default: false direction: description: Define direction of traffic. type: str choices: - IN - OUT - IN_OUT disabled: description: Flag to disable the rule type: bool default: false display_name: description: Identifier to use when displaying entity in logs or GUI. Defaults to ID if not set type: str id: description: Unique identifier of this resource type: str required: true ip_protocol: description: - IPv4 vs IPv6 packet type - Type of IP packet that should be matched while enforcing the rule. The value is set to IPV4_IPV6 for Layer3 rule if not specified. For Layer2/Ether rule the value must be null. type: str choices: - IPV4 - IPV6 - IPV4_IPV6 logged: description: Flag to enable packet logging. Default is disabled. type: bool default: false notes: description: Text for additional notes on changes type: str profiles: description: - Layer 7 service profiles - Holds the list of layer 7 service profile paths. These profiles accept attributes and sub-attributes of various network services (e.g. L4 AppId, encryption algorithm, domain name, etc) as key value pairs type: list scope: description: The list of policy paths where the rule is applied LR/Edge/T0/T1/LRP etc. Note that a given rule can be applied on multiple LRs/LRPs type: list sequence_number: description: Sequence number of the this Rule type: int service_entries: description: - Raw services - In order to specify raw services this can be used, along with services which contains path to services. This can be empty or null type: list elements: dict services: description: Paths of services In order to specify all services, use the constant "ANY". This is case insensitive. If "ANY" is used, it should be the ONLY element in the services array. Error will be thrown if ANY is used in conjunction with other values. type: list required: true source_groups: description: Source group paths type: list required: true sources_excluded: description: Negation of source groups If set to true, the rule gets applied on all the groups that are NOT part of the source groups. If false, the rule applies to the source groups type: bool default: false tag: description: - Tag applied on the rule - User level field which will be printed in CLI and packet logs. type: str tags: description: Opaque identifiers meaningful to the API user type: list elements: dict suboptions: scope: description: Tag scope type: str tag: description: Tag value type: str tcp_strict: type: bool description: - Enforce strict tcp handshake before allowing data packets - Ensures that a 3 way TCP handshake is done before the data packets are sent. - tcp_strict=true is supported only for stateful security policies ''' EXAMPLES = ''' - name: create Security Policy nsxt_policy_security_policy: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False id: test-sec-pol display_name: test-sec-pol state: "present" domain_id: "default" locked: True rules: - action: "ALLOW" description: "example-rule" sequence_number: 1 display_name: "test-example-rule" id: "test-example-rule" source_groups: ["/infra/domains/vmc/groups/dbgroup"] destination_groups: ["/infra/domains/vmc/groups/appgroup"] services: ["/infra/services/HTTP", "/infra/services/CIM-HTTP"] tag: my-tag tags: - scope: scope-1 tag: tag-1 logged: True notes: dummy-notes ip_protocol: IPV4_IPV6 scope: my-scope profiles: "encryption algorithm" ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import SECURITY_POLICY_URL from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_resource_specs.security_policy import SPEC as SecurityPolicySpec from ansible.module_utils._text import to_native class NSXTSecurityPolicy(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): return SecurityPolicySpec @staticmethod def get_resource_base_url(baseline_args): return SECURITY_POLICY_URL.format( baseline_args["domain_id"]) def update_resource_params(self, nsx_resource_params): nsx_resource_params.pop('domain_id') if __name__ == '__main__': sec_policy = NSXTSecurityPolicy() sec_policy.realize(baseline_arg_names=["domain_id"]) ================================================ FILE: plugins/modules/nsxt_policy_segment.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_policy_segment short_description: Create or Delete a Policy Segment description: Creates or deletes a Policy Segment. Required attributes include id and display_name. If the specified TransportZone is of VLAN type, a vlan_id is also required. version_added: "2.8" author: Gautam Verma options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. type: str password: description: - The password to authenticate with the NSX manager. - Must be specified if username is specified type: str ca_path: description: Path to the CA bundle to be used to verify host's SSL certificate type: str nsx_cert_path: description: Path to the certificate created for the Principal Identity using which the CRUD operations should be performed type: str nsx_key_path: description: - Path to the certificate key created for the Principal Identity using which the CRUD operations should be performed - Must be specified if nsx_cert_path is specified type: str request_headers: description: HTTP request headers to be sent to the host while making any request type: dict display_name: description: - Display name. - If resource ID is not specified, display_name will be used as ID. required: false type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true validate_certs: description: Enable server certificate verification. type: bool default: False tags: description: Opaque identifiers meaningful to the API user. type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str create_or_update_subresource_first: type: bool default: false description: - Can be used to create subresources first. - Can be specified for each subresource. delete_subresource_first: type: bool default: true description: - Can be used to delete subresources first. - Can be specified for each subresource. achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager. - Can be specified for each subresource. id: description: The id of the Policy Segment. required: false type: str description: description: Segment description. type: str address_bindings: description: Address bindings for the Segment type: list elements: dict suboptions: ip_address: description: IP Address for port binding type: str mac_address: description: Mac address for port binding type: str vlan_id: description: VLAN ID for port binding type: int admin_state: description: Represents Desired state of the Segment type: str choices: - UP - DOWN default: UP advanced_config: description: Advanced configuration for Segment. type: dict suboptions: address_pool_display_name: description: - IP address pool display name - Either this or address_pool_id must be specified. If both are specified, address_pool_id takes precedence type: str address_pool_id: description: - IP address pool ID - Either this or address_pool_display_name must be specified. If both are specified, address_pool_id takes precedence type: str connectivity: description: Connectivity configuration to manually connect (ON) or disconnect (OFF) a logical entity from network topology. Only valid for Tier1 Segment type: str hybrid: description: - Flag to identify a hybrid logical switch - When set to true, all the ports created on this segment will behave in a hybrid fashion. The hybrid port indicates to NSX that the VM intends to operate in underlay mode, but retains the ability to forward egress traffic to the NSX overlay network. This property is only applicable for segment created with transport zone type OVERLAY_STANDARD. This property cannot be modified after segment is created. type: bool local_egress: description: - Flag to enable local egress - This property is used to enable proximity routing with local egress. When set to true, logical router interface (downlink) connecting Segment to Tier0/Tier1 gateway is configured with prefix-length 32. type: bool local_egress_routing_policies: description: An ordered list of routing policies to forward traffic to the next hop. type: list elements: dict suboptions: nexthop_address: required: true description: Next hop address for proximity routing type: str prefix_list_paths: required: true description: - Policy path to prefix lists - max 1 element - The destination address of traffic matching a prefix-list is forwarded to the nexthop_address. Traffic matching a prefix list with Action DENY will be dropped. Individual prefix-lists specified could have different actions. type: list elements: str multicast: description: - Enable multicast on the downlink - Enable multicast for a segment. Only applicable for segments connected to Tier0 gateway. type: bool uplink_teaming_policy_name: description: - Uplink Teaming Policy Name - The name of the switching uplink teaming policy for the Segment. This name corresponds to one of the switching uplink teaming policy names listed in TransportZone associated with the Segment. When this property is not specified, the segment will not have a teaming policy associated with it and the host switch's default teaming policy will be used by MP. type: str bridge_profiles: description: Bridge Profile Configuration type: list elements: dict suboptions: bridge_profile_path: description: - Policy path to L2 Bridge profile - Same bridge profile can be configured on different segments. Each bridge profile on a segment must unique. type: str required: true uplink_teaming_policy_name: description: - Uplink Teaming Policy Name - The name of the switching uplink teaming policy for the bridge endpoint. This name corresponds to one of the switching uplink teaming policy names listed in the transport zone. When this property is not specified, the teaming policy is assigned by MP. type: str vlan_ids: description: VLAN specification for bridge endpoint. Either VLAN ID or VLAN ranges can be specified. Not both. type: str vlan_transport_zone_path: description: - Policy path to VLAN Transport Zone - VLAN transport zone should belong to the enforcment-point as the transport zone specified in the segment. type: str required: true connectivity_path: description: Policy path to the connecting Tier-0 or Tier-1. Valid only for segments created under Infra type: str dhcp_config_path: description: - Policy path to DHCP configuration - Policy path to DHCP server or relay configuration to use for all IPv4 & IPv6 subnets configured on this segment. type: str extra_configs: description: - Extra configs on Segment - This property could be used for vendor specific configuration in key value string pairs, the setting in extra_configs will be automatically inheritted by segment ports in the Segment. type: list elements: dict suboptions: config_pair: description: Key value pair in string for the configuration type: dict required: true suboptions: key: description: Key type: str required: true value: description: Value type: str required: true l2_extension: description: Configuration for extending Segment through L2 VPN type: dict suboptions: l2vpn_paths: description: Policy paths corresponding to the associated L2 VPN sessions type: list elements: str local_egress: description: Local Egress type: dict suboptions: optimized_ips: description: Gateway IP for Local Egress. Local egress is enabled only when this list is not empty type: list elements: str tunnel_id: description: Tunnel ID type: int mac_pool_id: description: Allocation mac pool associated with the Segment type: str metadata_proxy_paths: description: Metadata Proxy Configuration Paths type: list elements: str overlay_id: description: - Overlay connectivity ID for this Segment - Used for overlay connectivity of segments. The overlay_id should be allocated from the pool as definied by enforcement-point. If not provided, it is auto-allocated from the default pool on the enforcement-point type: int replication_mode: description: Replication mode of the Segment type: str default: MTEP choices: - MTEP - SOURCE tier0_id: description: The Uplink of the Policy Segment. Mutually exclusive with tier_1_id. type: str tier0_display_name: description: Same as tier_0_id. Either one can be specified. If both are specified, tier_0_id takes precedence. type: str tier1_id: description: The Uplink of the Policy Segment. Mutually exclusive with tier_0_id but takes precedence. type: str tier1_display_name: description: Same as tier_1_id. Either one can be specified. If both are specified, tier_1_id takes precedence. type: str domain_name: description: Domain name associated with the Policy Segment. type: str transport_zone_id: description: The TZ associated with the Policy Segment. type: str transport_zone_display_name: description: Same as transport_zone_id. Either one can be specified. If both are specified, transport_zone_id takes precedence. type: str enforcementpoint_id: description: The EnforcementPoint ID where the TZ is located. Required if transport_zone_id is specified. default: default type: str site_id: description: The site ID where the EnforcementPoint is located. Required if transport_zone_id is specified. default: default type: str vlan_ids: description: VLAN ids for a VLAN backed Segment. Can be a VLAN id or a range of VLAN ids specified with '-' in between. type: list subnets: description: Subnets that belong to this Policy Segment. type: dict suboptions: dhcp_config: description: Additional DHCP configuration for current subnet type: dict suboptions: dns_servers: description: IP address of DNS servers for subnet. DNS server IP address must belong to the same address family as segment gateway_address property type: list lease_time: description: - DHCP lease time in seconds. When specified, this property overwrites lease time configured DHCP server config - Minimum: 60 - Maximum: 4294967295 - Default: "86400" type: int resource_type: description: Resource type choices: - SegmentDhcpV4Config - SegmentDhcpV6Config type: str server_address: description: IP address of the DHCP server in CIDR format. The server_address is mandatory in case this segment has provided a dhcp_config_path and it represents a DHCP server config. If this SegmentDhcpConfig is a SegmentDhcpV4Config, the address must be an IPv4 address. If this is a SegmentDhcpV6Config, the address must be an IPv6 address. This address must not overlap the ip-ranges of the subnet, or the gateway address of the subnet, or the DHCP static-binding addresses of this segment type: str options: description: - Property of SegmentDhcpV4Config - IPv4 DHCP options for segment subnet type: dict suboptions: option121: description: DHCP option 121 to define classless static routes type: dict suboptions: static_routes: description: Classless static route of DHCP option 121 type: list elements: dict suboptions: network: description: Destination network in CIDR format type: str required: true next_hop: description: IP address of next hop of the route type: str required: true others: description: Other DHCP options To define DHCP options other than option 121 in generic format. Please note, only the following options can be defined in generic format. Those other options will be accepted without validation but will not take effect -------------------------- Code Name -------------------------- 2 Time Offset 6 Domain Name Server 13 Boot File Size 19 Forward On/Off 26 MTU Interface 28 Broadcast Address 35 ARP Timeout 40 NIS Domain 41 NIS Servers 42 NTP Servers 44 NETBIOS Name Srv 45 NETBIOS Dist Srv 46 NETBIOS Node Type 47 NETBIOS Scope 58 Renewal Time 59 Rebinding Time 64 NIS+-Domain-Name 65 NIS+-Server-Addr 66 TFTP Server-Name (used by PXE) 67 Bootfile-Name (used by PXE) 93 PXE: Client system architecture 94 PXE: Client NDI 97 PXE: UUID/UNDI 117 Name Service Search 119 Domain Search 150 TFTP server address (used by PXE) 175 Etherboot 209 PXE Configuration File 210 PXE Path Prefix 211 PXE Reboot Time type: list elements: dict suboptions: code: description: DHCP option code, [0-255] type: int required: true values: description: DHCP option value type: list required: true domain_names: description: - Property of SegmentDhcpV6Config - Domain names for subnet type: list excluded_ranges: description: - Property of SegmentDhcpV6Config - Excluded IPv6 addresses to define dynamic ip allocation ranges type: list preferred_time: - Property of SegmentDhcpV6Config - The length of time that a valid address is preferred. When the preferred lifetime expires, the address becomes deprecated - Minimum: 60 - Maximum: 4294967295 type: int sntp_servers: description: - Property of SegmentDhcpV6Config - IPv6 address of SNTP servers for subnet type: list dhcp_ranges: description: DHCP address ranges for dynamic IP allocation. DHCP address ranges are used for dynamic IP allocation. Supports address range and CIDR formats. First valid host address from the first value is assigned to DHCP server IP address. Existing values cannot be deleted or modified, but additional DHCP ranges can be added. Formats, e.g. 10.12.2.64/26, 10.12.2.2-10.12.2.50 type: list gateway_address: description: Gateway IP address. Gateway IP address in CIDR format for both IPv4 and IPv6. required: True type: str segment_ports: type: list description: - Add the Segment Ports to be create, updated, or deleted in this section element: dict suboptions: address_bindings: description: Static address binding used for the port. type: list elements: dict suboptions: ip_address: description: IP Address for port binding. type: str mac_address: description: Mac address for port binding. type: str vlan_id: description: VLAN ID for port binding. type: str attachment: description: VIF attachment. type: dict suboptions: allocate_addresses: description: Indicate how IP will be allocated for the port. type: str choices: - IP_POOL - MAC_POOL - BOTH - NONE app_id: description: ID used to identify/look up a child attachment behind a parent attachment. type: str context_id: description: Parent VIF ID if type is CHILD, Transport node ID if type is INDEPENDENT. type: str id: description: VIF UUID on NSX Manager. type: str traffic_tag: description: - VLAN ID - Not valid when type is INDEPENDENT, mainly used to identify traffic from different ports in container use case type: int type: description: Type of port attachment. type: str choices: - PARENT - CHILD - INDEPENDENT display_name: description: - Segment Port display name. - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str description: description: - Segment description. type: str do_wait_till_create: type: bool default: false description: Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager extra_configs: description: - Extra configs on segment port - This property could be used for vendor specific configuration in key value string pairs. Segment port setting will override segment setting if the same key was set on both segment and segment port. type: list element: dict suboptions: config_pair: description: Key value pair in string for the configuration type: dict required: true suboptions: key: description: Key type: str required: true value: description: Value type: str required: true id: description: The id of the Policy Segment Port. required: false type: str ignored_address_bindings: description: - Address bindings to be ignored by IP Discovery module IP Discovery module uses various mechanisms to discover address bindings being used on each segment port. If a user would like to ignore any specific discovered address bindings or prevent the discovery of a particular set of discovered bindings, then those address bindings can be provided here. Currently IP range in CIDR format is not supported. type: dict suboptions: ip_address: description: IP Address for port binding. type: str mac_address: description: Mac address for port binding. type: str vlan_id: description: VLAN ID for port binding. type: str init_state: description: - Initial state of this logical ports - Set initial state when a new logical port is created. 'UNBLOCKED_VLAN' means new port will be unblocked on traffic in creation, also VLAN will be set with corresponding logical switch setting. This port setting can only be configured at port creation, and cannot be modified. type: str choices: - UNBLOCKED_VLAN default: UNBLOCKED_VLAN state: choices: - present - absent description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource - Required if I(id != null) required: true tags: description: Opaque identifiers meaningful to the API user. type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str ''' EXAMPLES = ''' - name: create Segment nsxt_policy_segment: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False display_name: test-seg-4 state: present domain_name: dn1 transport_zone_display_name: "1-transportzone-730" replication_mode: "SOURCE" address_bindings: - ip_address: "10.1.2.11" advanced_config: address_pool_display_name: small-2-pool connectivity: "OFF" hybrid: False local_egress: True admin_state: UP connectivity_path: "/infra/tier-1s/d082bc25-a9b2-4d13-afe5-d3cecad4b854" subnets: - gateway_address: "40.1.1.1/16" segment_ports: - display_name: test-sp-1 state: present tags: - scope: "scope-1" tag: "tag-2" extra_configs: - config_pair: key: key value: value ignored_address_bindings: - ip_address: "10.1.2.122" - display_name: test-sp-2 state: present - display_name: test-sp-3 state: present ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import ( SEGMENT_PORT_URL, SEGMENT_URL, TIER_0_URL, TIER_1_URL, TRANSPORT_ZONE_URL, IP_POOL_URL) from ansible.module_utils._text import to_native class NSXTSegment(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): segment_arg_spec = {} segment_arg_spec.update( address_bindings=dict( required=False, type='list', elements='dict', options=dict( ip_address=dict( required=False, type='str' ), mac_address=dict( required=False, type='str' ), vlan_id=dict( required=False, type='int' ) ) ), admin_state=dict( type='str', choices=['UP', 'DOWN'], default='UP' ), advanced_config=dict( required=False, type='dict', options=dict( address_pool_id=dict( required=False, type='str' ), address_pool_display_name=dict( required=False, type='str' ), connectivity=dict( default="ON", type='str', choices=["ON", "OFF"], ), hybrid=dict( required=False, type='bool', default=False ), local_egress=dict( required=False, type='bool', default=False ), local_egress_routing_policies=dict( required=False, type='list', elements='dict', options=dict( nexthop_address=dict( required=True, type='str' ), prefix_list_paths=dict( required=True, type='list', elements='str' ), ) ), multicast=dict( required=False, type='bool' ), uplink_teaming_policy_name=dict( required=False, type='str' ), ) ), bridge_profiles=dict( type='list', elements='dict', options=dict( bridge_profile_path=dict( type='str', required=True ), uplink_teaming_policy_name=dict( type='str' ), vlan_ids=dict( type='list', elements='str' ), vlan_transport_zone_path=dict( type='str', required=True ), ) ), connectivity_path=dict( type='str' ), dhcp_config_path=dict( type='str' ), domain_name=dict( required=False, type='str' ), enforcementpoint_id=dict( required=False, type='str', ), extra_configs=dict( type='list', elements='dict', options=dict( config_pair=dict( type='dict', required=True, options=dict( key=dict( type='str', required=True ), value=dict( type='str', required=True ) ) ), ) ), l2_extension=dict( type='dict', options=dict( l2vpn_path=dict( type='str', required=True ), l2vpn_paths=dict( type='list', elements='str' ), local_egress=dict( type='dict', options=dict( optimized_ips=dict( type='list', elements='str' ) ) ), tunnel_id=dict( type='int' ), ) ), mac_pool_id=dict( required=False, type='str' ), metadata_proxy_paths=dict( elements='str', type='list' ), overlay_id=dict( type='int' ), replication_mode=dict( type='str', default="MTEP", choices=["MTEP", "SOURCE"] ), site_id=dict( required=False, type='str', ), subnets=dict( required=False, type='list', elements='dict', options=dict( dhcp_config=dict( required=False, type='dict', options=dict( dns_servers=dict( required=False, type='list', ), lease_time=dict( required=False, type='int', ), resource_type=dict( required=True, type='str', choices=[ 'SegmentDhcpV4Config', 'SegmentDhcpV6Config'] ), server_address=dict( required=False, type='str', ), options=dict( required=False, type='dict', suboptions=dict( option121=dict( required=False, type='dict', suboptions=dict( static_routes=dict( required=False, type='list', elements='dict', suboptions=dict( network=dict( required=True, ), next_hop=dict( required=True, ), ), ), ), ), others=dict( required=False, type='list', elements='dict', suboptions=dict( code=dict( required=True, type='int', ), values=dict( required=True, type='list', ), ), ), ), ), domain_names=dict( required=False, type='list', ), excluded_ranges=dict( required=False, type='list', ), preferred_time=dict( required=False, type='int', ), sntp_servers=dict( required=False, type='list', ), ), ), dhcp_ranges=dict( required=False, type='list' ), gateway_address=dict( required=True, type='str' ) ) ), tier0_display_name=dict( required=False, type='str' ), tier0_id=dict( required=False, type='str' ), tier1_display_name=dict( required=False, type='str' ), tier1_id=dict( required=False, type='str' ), transport_zone_display_name=dict( required=False, type='str' ), transport_zone_id=dict( required=False, type='str' ), vlan_ids=dict( required=False, type='list' ), ) return segment_arg_spec @staticmethod def get_resource_base_url(baseline_args=None): return SEGMENT_URL def update_resource_params(self, nsx_resource_params): if self.do_resource_params_have_attr_with_id_or_display_name( "tier0"): tier0_id = self.get_id_using_attr_name_else_fail( "tier0", nsx_resource_params, TIER_0_URL, "Tier0") nsx_resource_params["connectivity_path"] = ( TIER_0_URL + "/" + tier0_id) elif self.do_resource_params_have_attr_with_id_or_display_name( "tier1"): tier1_id = self.get_id_using_attr_name_else_fail( "tier1", nsx_resource_params, TIER_1_URL, "Tier1") nsx_resource_params["connectivity_path"] = ( TIER_1_URL + "/" + tier1_id) if self.do_resource_params_have_attr_with_id_or_display_name( "transport_zone"): site_id = nsx_resource_params.pop("site_id", 'default') enforcementpoint_id = nsx_resource_params.pop( "enforcementpoint_id", 'default') transport_zone_base_url = ( TRANSPORT_ZONE_URL.format(site_id, enforcementpoint_id)) transport_zone_id = self.get_id_using_attr_name_else_fail( "transport_zone", nsx_resource_params, transport_zone_base_url, "Transport Zone") nsx_resource_params["transport_zone_path"] = ( transport_zone_base_url + "/" + transport_zone_id) if 'advanced_config' in nsx_resource_params and nsx_resource_params[ 'advanced_config']: address_pool_id = None if nsx_resource_params['advanced_config'].get('address_pool_id'): address_pool_id = nsx_resource_params['advanced_config'].pop( 'address_pool_id') elif nsx_resource_params['advanced_config'].get( 'address_pool_display_name'): address_pool_id = self.get_id_from_display_name( IP_POOL_URL, nsx_resource_params['advanced_config'][ 'address_pool_display_name'], "Ip Pool", ignore_not_found_error=False) nsx_resource_params['advanced_config'].pop( 'address_pool_display_name') if address_pool_id: address_pool_paths = [IP_POOL_URL + "/" + address_pool_id] nsx_resource_params['advanced_config'][ 'address_pool_paths'] = address_pool_paths self._updateSubnetsAsPerIpvType(nsx_resource_params) def _updateSubnetsAsPerIpvType(self, nsx_resource_params): subnets = nsx_resource_params.get('subnets', []) for subnet in subnets: dhcp_config = subnet.get('dhcp_config') if dhcp_config: if dhcp_config['resource_type'] == "SegmentDhcpV4Config": self._remove_ipv6_subnet_attrs(dhcp_config) else: self._remove_ipv4_subnet_attrs(dhcp_config) def _remove_ipv6_subnet_attrs(self, dhcp_config): dhcp_config.pop('domain_names', None) dhcp_config.pop('excluded_ranges', None) dhcp_config.pop('preferred_time', None) dhcp_config.pop('sntp_servers', None) def _remove_ipv4_subnet_attrs(self, dhcp_config): dhcp_config.pop('options', None) def update_parent_info(self, parent_info): parent_info["segment_id"] = self.id class NSXTSegmentPort(NSXTBaseRealizableResource): def get_spec_identifier(self): return NSXTSegment.NSXTSegmentPort.get_spec_identifier() @classmethod def get_spec_identifier(cls): return "segment_ports" @staticmethod def get_resource_spec(): segment_port_arg_spec = {} segment_port_arg_spec.update( address_bindings=dict( required=False, type='list', elements='dict', options=dict( ip_address=dict( required=False, type='str' ), mac_address=dict( required=False, type='str' ), vlan_id=dict( required=False, type='int' ) ) ), admin_state=dict( required=False, type='str', default='UP', choices=['UP', 'DOWN'] ), attachment=dict( required=False, type='dict', options=dict( allocate_addresses=dict( required=False, type='str', choices=['IP_POOL', 'MAC_POOL', 'BOTH', 'NONE'] ), app_id=dict( required=False, type='str', ), context_id=dict( required=False, type='str', ), id=dict( required=False, type='str', ), traffic_tag=dict( required=False, type='int' ), type=dict( required=False, type='str', choices=['PARENT', 'CHILD', 'INDEPENDENT'] ) ) ), extra_configs=dict( required=False, type='list', elements='dict', options=dict( config_pair=dict( required=True, type='dict', options=dict( key=dict( required=True, type='str' ), value=dict( required=True, type='str' ) ) ), ) ), ignored_address_bindings=dict( required=False, type='list', elements='dict', options=dict( ip_address=dict( required=False, type='str' ), mac_address=dict( required=False, type='str' ), vlan_id=dict( required=False, type='int' ) ) ), init_state=dict( type='str', default='UNBLOCKED_VLAN', choices=['UNBLOCKED_VLAN'] ) ) return segment_port_arg_spec @staticmethod def get_resource_base_url(parent_info): segment_id = parent_info.get("segment_id", 'default') return SEGMENT_PORT_URL.format(segment_id) if __name__ == '__main__': segment = NSXTSegment() segment.realize() ================================================ FILE: plugins/modules/nsxt_policy_tier0.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: nsxt_policy_tier0 short_description: 'Create/Update/Delete a Tier-0 and associated resources' description: Creates/Updates/Deletes a Tier-0 resource using the Policy API. Assocaited resources include 'Tier-0 Locale Service' and 'Tier-0 Interface'. 'Tier-0 Locale Service' and 'Tier-0 Interface' attributes must be prepended with 't0ls' and 't0iface' respectively. version_added: '2.8' author: 'Gautam Verma' extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: Tier-0 ID required: false type: str description: description: Tier-0 description type: str default_rule_logging: description: Enable logging for whitelisted rule. Indicates if logging should be enabled for the default whitelisting rule. default: false ha_mode: description: High-availability Mode for Tier-0 choices: - 'ACTIVE_STANDBY' - 'ACTIVE_ACTIVE' default: 'ACTIVE_ACTIVE' type: str disable_firewall: description: Disable or enable gateway fiewall. default: False type: bool failover_mode: description: Determines the behavior when a Tier-0 instance in ACTIVE-STANDBY high-availability mode restarts after a failure. If set to PREEMPTIVE, the preferred node will take over, even if it causes another failure. If set to NON_PREEMPTIVE, then the instance that restarted will remain secondary. This property must not be populated unless the ha_mode property is set to ACTIVE_STANDBY. choices: - 'NON_PREEMPTIVE' - 'PREEMPTIVE' default: 'NON_PREEMPTIVE' type: str force_whitelisting: description: Flag to add whitelisting FW rule during realization. default: False type: bool internal_transit_subnets: description: Internal transit subnets in CIDR format. Specify subnets that are used to assign addresses to logical links connecting service routers and distributed routers. Only IPv4 addresses are supported. When not specified, subnet 169.254.0.0/ 24 is assigned by default in ACTIVE_ACTIVE HA mode or 169.254.0.0/28 in ACTIVE_STANDBY mode. default: False type: list intersite_config: description: Inter site routing configuration when the gateway is streched. type: dict suboptions: fallback_sites: description: Fallback site to be used as new primary site on current primary site failure. Disaster recovery must be initiated via API/UI. Fallback site configuration is supported only for T0 gateway. T1 gateway will follow T0 gateway's primary site during disaster recovery type: list intersite_transit_subnet: description: - Transit subnet in CIDR format - IPv4 subnet for inter-site transit segment connecting service routers across sites for stretched gateway. For IPv6 link local subnet is auto configured type: str default: "169.254.32.0/20" last_admin_active_epoch: description: - Epoch of last time admin changing active LocaleServices - Epoch(in seconds) is auto updated based on system current timestamp when primary locale service is updated. It is used for resolving conflict during site failover. If system clock not in sync then User can optionally override this. New value must be higher than the current value. type: int primary_site_path: description: - Primary egress site for gateway. - Primary egress site for gateway. T0/T1 gateway in Active/Standby mode supports stateful services on primary site. In this mode primary site must be set if gateway is stretched to more than one site. For T0 gateway in Active/Active primary site is optional field. If set then secondary site prefers routes learned from primary over locally learned routes. This field is not applicable for T1 gateway with no services type: str ipv6_ndra_profile_id: description: IPv6 NDRA profile configuration on Tier0. Either or both NDRA and/or DAD profiles can be configured. Related attribute ipv6_dad_profile_id. type: str ipv6_ndra_profile_display_name: description: Same as ipv6_ndra_profile_id. Either one can be specified. If both are specified, ipv6_ndra_profile_id takes precedence. type: str ipv6_dad_profile_id: description: IPv6 DRA profile configuration on Tier0. Either or both NDRA and/or DAD profiles can be configured. Related attribute ipv6_ndra_profile_id. type: str ipv6_dad_profile_display_name: description: Same as ipv6_dad_profile_id. Either one can be specified. If both are specified, ipv6_dad_profile_id takes precedence. type: str rd_admin_field: description: - Route distinguisher administrator address - If you are using EVPN service, then route distinguisher administrator address should be defined if you need auto generation of route distinguisher on your VRF configuration type: str transit_subnets: description: Transit subnets in CIDR format. Specify transit subnets that are used to assign addresses to logical links connecting tier-0 and tier-1s. Both IPv4 and IPv6 addresses are supported. When not specified, subnet 100.64.0.0/16 is configured by default. type: list dhcp_config_id: description: DHCP configuration for Segments connected to Tier-0. DHCP service is configured in relay mode. type: str dhcp_config_display_name: description: Same as dhcp_config_id. Either one can be specified. If both are specified, dhcp_config_id takes precedence. type: str vrf_config: type: dict description: VRF config, required for VRF Tier0 suboptions: description: description: Description of this resource type: str display_name: description: - Identifier to use when displaying entity in logs or GUI - Defaults to id if not set - Error if both not specified type: str evpn_transit_vni: description: - L3 VNI associated with the VRF for overlay traffic. - VNI must be unique and belong to configured VNI pool. type: int id: description: - Unique identifier of this resource - Defaults to display_name if not set - Error if both not specified type: str route_distinguisher: description: Route distinguisher. 'ASN:<>' or 'IPAddress:<>'. type: str route_targets: description: Route targets type: list element: dict suboptions: description: description: Description of this resource type: str display_name: description: - Identifier to use when displaying entity in logs or GUI - Defaults to id if not set - Error if both not specified type: str export_route_targets: description: Export route targets. 'ASN:' or 'IPAddress:<>' type: list element: str id: description: - Unique identifier of this resource - Defaults to display_name if not set - Error if both not specified type: str import_route_targets: description: Import route targets. 'ASN:' or 'IPAddress:<>' type: list element: str tags: description: Opaque identifiers meaningful to the API user type: list element: dict suboptions: scope: description: Tag scope type: str tag: description: Tag value type: str tags: description: Opaque identifiers meaningful to the API user type: list element: dict suboptions: scope: description: Tag scope type: str tag: description: Tag value type: str tier0_display_name: description: Default tier0 display name. Cannot be modified after realization. Either this or tier0_id must be specified type: str tier0_id: description: Default tier0 id. Cannot be modified after realization. Either this or tier0_id must be specified type: str static_routes: type: list element: dict description: This is a list of Static Routes that need to be created, updated, or deleted suboptions: id: description: Tier-0 Static Route ID. required: false type: str display_name: description: - Tier-0 Static Route display name. - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str description: description: - Tier-0 Static Route description. type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource. - Must be specified in order to modify the resource choices: - present - absent network: description: Network address in CIDR format required: true type: str next_hops: description: Next hop routes for network type: list elements: dict suboptions: admin_distance: description: Cost associated with next hop route type: int default: 1 ip_address: description: Next hop gateway IP address type: str scope: description: - Interface path associated with current route - For example, specify a policy path referencing the IPSec VPN Session type: list tags: description: Opaque identifiers meaningful to the API user type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager bfd_peers: type: list element: dict description: This is a list of BFD Peers that need to be created, updated, or deleted suboptions: id: description: Tier-0 BFD Peer ID. required: false type: str display_name: description: - Tier-0 BFD Peer display name. - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str description: description: - Tier-0 BFD Peer description. config type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource. - Must be specified in order to modify the resource choices: - present - absent bfd_profile_id: description: - The associated BFD Profile ID - Either this or bfd_profile_display_name must be specified - BFD Profile is not supported for IPv6 networks. type: str bfd_profile_display_name: description: - The associated BFD Profile display name - Either this or bfd_profile_id must be specified - BFD Profile is not supported for IPv6 networks. type: str enabled: description: Flag to enable BFD peer. type: list elements: dict suboptions: admin_distance: description: Cost associated with next hop route type: int default: 1 ip_address: description: Next hop gateway IP address type: str scope: description: - Interface path associated with current route - For example, specify a policy path referencing the IPSec VPN Session type: list tags: description: Opaque identifiers meaningful to the API user type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str locale_services: type: list element: dict description: This is a list of Locale Services that need to be created, updated, or deleted suboptions: id: description: Tier-0 Locale Service ID. required: false type: str display_name: description: - Tier-0 Locale Service display name. - Either this or id must be specified. If both are specified, id takes precedence required: false type: str description: description: - Tier-0 Locale Service description. type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource - Required if id is specified. choices: - present - absent tags: description: Opaque identifiers meaningful to the API user type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str create_or_update_subresource_first: type: bool default: false description: - Can be used to create subresources first. - Can be specified for each subresource. delete_subresource_first: type: bool default: true description: - Can be used to delete subresources first. - Can be specified for each subresource. achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager. - Can be specified for each subresource. edge_cluster_info: description: Used to create path to edge cluster. Auto-assigned if associated enforcement-point has only one edge cluster. type: dict suboptions: site_id: description: site_id where edge cluster is located default: default type: str enforcementpoint_id: description: enforcementpoint_id where edge cluster is located default: default type: str edge_cluster_id: description: ID of the edge cluster type: str edge_cluster_display_name: description: - display name of the edge cluster. - Either this or edge_cluster_id must be specified. If both are specified, edge_cluster_id takes precedence type: str preferred_edge_nodes_info: description: Used to create paths to edge nodes. Specified edge is used as preferred edge cluster member when failover mode is set to PREEMPTIVE, not applicable otherwise. type: list suboptions: site_id: description: site_id where edge node is located default: default type: str enforcementpoint_id: description: enforcementpoint_id where edge node is located default: default type: str edge_cluster_id: description: edge_cluster_id where edge node is located type: str edge_cluster_display_name: description: - display name of the edge cluster. - either this or edge_cluster_id must be specified. If both are specified, edge_cluster_id takes precedence type: str edge_node_id: description: ID of the edge node type: str edge_node_display_name: description: - Display name of the edge node. - either this or edge_node_id must be specified. If both are specified, edge_node_id takes precedence type: str route_redistribution_types: description: - Enable redistribution of different types of routes on Tier-0. - This property is only valid for locale-service under Tier-0. - This property is deprecated, please use "route_redistribution_config" property to configure redistribution rules. type: list choices: - TIER0_STATIC - Redistribute user added static routes. - TIER0_CONNECTED - Redistribute all subnets configured on Interfaces and routes related to TIER0_ROUTER_LINK, TIER0_SEGMENT, TIER0_DNS_FORWARDER_IP, TIER0_IPSEC_LOCAL_IP, TIER0_NAT types. - TIER1_STATIC - Redistribute all subnets and static routes advertised by Tier-1s. - TIER0_EXTERNAL_INTERFACE - Redistribute external interface subnets on Tier-0. - TIER0_LOOPBACK_INTERFACE - Redistribute loopback interface subnets on Tier-0. - TIER0_SEGMENT - Redistribute subnets configured on Segments connected to Tier-0. - TIER0_ROUTER_LINK - Redistribute router link port subnets on Tier-0. - TIER0_SERVICE_INTERFACE - Redistribute Tier0 service interface subnets. - TIER0_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets. - TIER0_IPSEC_LOCAL_IP - Redistribute IPSec subnets. - TIER0_NAT - Redistribute NAT IPs owned by Tier-0. - TIER0_EVPN_TEP_IP - Redistribute EVPN local endpoint subnets on Tier-0. - TIER1_NAT - Redistribute NAT IPs advertised by Tier-1 instances. - TIER1_LB_VIP - Redistribute LB VIP IPs advertised by Tier-1 instances. - TIER1_LB_SNAT - Redistribute LB SNAT IPs advertised by Tier-1 instances. - TIER1_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets on Tier-1 instances. - TIER1_CONNECTED - Redistribute all subnets configured on Segments and Service Interfaces. - TIER1_SERVICE_INTERFACE - Redistribute Tier1 service interface subnets. - TIER1_SEGMENT - Redistribute subnets configured on Segments connected to Tier1. - TIER1_IPSEC_LOCAL_ENDPOINT - Redistribute IPSec VPN local-endpoint subnets advertised by TIER1. route_redistribution_config: description: Configure all route redistribution properties like enable/disable redistributon, redistribution rule and so on. type: dict suboptions: bgp_enabled: description: Flag to enable route redistribution. type: bool default: false redistribution_rules: description: List of redistribution rules. type: list elements: dict suboptions: name: description: Rule name type: str route_map_path: description: Route map to be associated with the redistribution rule type: str route_redistribution_types: description: Tier-0 route redistribution types choices: - TIER0_STATIC - Redistribute user added static routes. - TIER0_CONNECTED - Redistribute all subnets configured on Interfaces and routes related to TIER0_ROUTER_LINK, TIER0_SEGMENT, TIER0_DNS_FORWARDER_IP, TIER0_IPSEC_LOCAL_IP, TIER0_NAT types. - TIER1_STATIC - Redistribute all subnets and static routes advertised by Tier-1s. - TIER0_EXTERNAL_INTERFACE - Redistribute external interface subnets on Tier-0. - TIER0_LOOPBACK_INTERFACE - Redistribute loopback interface subnets on Tier-0. - TIER0_SEGMENT - Redistribute subnets configured on Segments connected to Tier-0. - TIER0_ROUTER_LINK - Redistribute router link port subnets on Tier-0. - TIER0_SERVICE_INTERFACE - Redistribute Tier0 service interface subnets. - TIER0_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets. - TIER0_IPSEC_LOCAL_IP - Redistribute IPSec subnets. - TIER0_NAT - Redistribute NAT IPs owned by Tier-0. - TIER0_EVPN_TEP_IP - Redistribute EVPN local endpoint subnets on Tier-0. - TIER1_NAT - Redistribute NAT IPs advertised by Tier-1 instances. - TIER1_LB_VIP - Redistribute LB VIP IPs advertised by Tier-1 instances. - TIER1_LB_SNAT - Redistribute LB SNAT IPs advertised by Tier-1 instances. - TIER1_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets on Tier-1 instances. - TIER1_CONNECTED - Redistribute all subnets configured on Segments and Service Interfaces. - TIER1_SERVICE_INTERFACE - Redistribute Tier1 service interface subnets. - TIER1_SEGMENT - Redistribute subnets configured on Segments connected to Tier1. - TIER1_IPSEC_LOCAL_ENDPOINT - Redistribute IPSec VPN local-endpoint subnets advertised by TIER1. type: list ha_vip_configs: type: list elements: dict description: - Array of HA VIP Config. - This configuration can be defined only for Active-Standby Tier0 gateway to provide redundancy. For mulitple external interfaces, multiple HA VIP configs must be defined and each config will pair exactly two external interfaces. The VIP will move and will always be owned by the Active node. When this property is configured, configuration of dynamic-routing is not allowed. suboptions: enabled: description: Flag to enable this HA VIP config. default: true type: bool external_interface_paths: description: - Policy paths to Tier0 external interfaces for providing redundancy - Policy paths to Tier0 external interfaces which are to be paired to provide redundancy. Floating IP will be owned by one of these interfaces depending upon which edge node is Active. type: list vip_subnets: description: - VIP floating IP address subnets - Array of IP address subnets which will be used as floating IP addresses. type: list suboptions: ip_addresses: description: IP addresses assigned to interface type: list required: true prefix_len: description: Subnet prefix length type: int required: true BGP: description: Specify the BGP spec in this section type: dict suboptions: ecmp: description: Flag to enable ECMP. type: bool required: False default: True enabled: description: Flag to enable BGP configuration. Disabling will stop feature and BGP peering. type: bool default: True graceful_restart_config: description: Configuration field to hold BGP Restart mode and timer. type: dict required: False suboptions: mode: description: - BGP Graceful Restart Configuration Mode - If mode is DISABLE, then graceful restart and helper modes are disabled. - If mode is GR_AND_HELPER, then both graceful restart and helper modes are enabled. - If mode is HELPER_ONLY, then helper mode is enabled. HELPER_ONLY mode is the ability for a BGP speaker to indicate its ability to preserve forwarding state during BGP restart. - GRACEFUL_RESTART mode is the ability of a BGP speaker to advertise its restart to its peers. type: str required: False default: 'HELPER_ONLY' choices: - DISABLE - GR_AND_HELPER - HELPER_ONLY timer: description: BGP Graceful Restart Timer type: dict required: False suboptions: restart_timer: description: - BGP Graceful Restart Timer - Maximum time taken (in seconds) for a BGP session to be established after a restart. This can be used to speed up routing convergence by its peer in case the BGP speaker does not come back up after a restart. If the session is not re-established within this timer, the receiving speaker will delete all the stale routes from that peer. Min 1 and Max 3600 type: int default: 180 stale_route_timer: description: - BGP Stale Route Timer - Maximum time (in seconds) before stale routes are removed from the RIB (Routing Information Base) when BGP restarts. Min 1 and Max 3600 type: int default: 600 inter_sr_ibgp: description: Flag to enable inter SR IBGP configuration. When not specified, inter SR IBGP is automatically enabled if Tier-0 is created in ACTIVE_ACTIVE ha_mode. type: bool required: False local_as_num: description: - BGP AS number in ASPLAIN/ASDOT Format. - Specify BGP AS number for Tier-0 to advertize to BGP peers. AS number can be specified in ASPLAIN (e.g., "65546") or ASDOT (e.g., "1.10") format. Empty string disables BGP feature. type: str required: True multipath_relax: description: Flag to enable BGP multipath relax option. type: bool default: True route_aggregations: description: List of routes to be aggregated type: dict required: False suboptions: prefix: description: CIDR of aggregate address type: str required: True summary_only: description: - Send only summarized route. - Summarization reduces number of routes advertised by representing multiple related routes with prefix property type: bool default: True neighbors: description: Specify the BGP neighbors in this section that need to be created, updated, or deleted type: list element: dict suboptions: allow_as_in: description: Flag to enable allowas_in option for BGP neighbor type: bool default: False bfd: description: - BFD configuration for failure detection - BFD is enabled with default values when not configured type: dict required: False suboptions: enabled: description: Flag to enable BFD cofiguration type: bool required: False interval: description: Time interval between heartbeat packets in milliseconds. Min 300 and Max 60000 type: int default: 1000 multiple: description: - Declare dead multiple. - Number of times heartbeat packet is missed before BFD declares the neighbor is down. Min 2 and Max 16 type: int default: 3 graceful_restart_mode: description: - BGP Graceful Restart Configuration Mode - If mode is DISABLE, then graceful restart and helper modes are disabled. - If mode is GR_AND_HELPER, then both graceful restart and helper modes are enabled. - If mode is HELPER_ONLY, then helper mode is enabled. HELPER_ONLY mode is the ability for a BGP speaker to indicate its ability to preserve forwarding state during BGP restart. - GRACEFUL_RESTART mode is the ability of a BGP speaker to advertise its restart to its peers. type: str choices: - DISABLE - GR_AND_HELPER - HELPER_ONLY hold_down_time: description: Wait time in seconds before declaring peer dead. Min 1 and Max 65535 type: int default: 180 keep_alive_time: description: Interval between keep alive messages sent to peer. Min 1 and Max 65535. type: int default: 60 maximum_hop_limit: description: Maximum number of hops allowed to reach BGP neighbor. Min 1 and Max 255 type: int default: 1 address: description: Neighbor IP Address type: str required: True password: description: Password for BGP Neighbor authentication. Empty string ("") clears existing password. type: str required: False remote_as_num: description: 4 Byte ASN of the neighbor in ASPLAIN Format type: str required: True route_filtering: description: Enable address families and route filtering in each direction type: list elements: dict required: False suboptions: address_family: description: Address family type type: str required: False choices: - 'IPV4' - 'IPV6' - 'VPN' enabled: description: Flag to enable address family type: bool default: True in_route_filters: description: - Prefix-list or route map path for IN direction - Specify path of prefix-list or route map to filter routes for IN direction. type: list required: False out_route_filters: description: - Prefix-list or route map path for OUT direction - Specify path of prefix-list or route map to filter routes for OUT direction. When not specified, a built-in prefix-list named 'prefixlist-out-default' is automatically applied. type: list required: False source_addresses: description: - Source IP Addresses for BGP peering - Source addresses should belong to Tier0 external or loopback interface IP Addresses. BGP peering is formed from all these addresses. This property is mandatory when maximum_hop_limit is greater than 1. type: list required: False interfaces: type: list element: dict description: Specify the interfaces associated with the Gateway in this section that need to be created, updated, or deleted suboptions: id: description: Tier-0 Interface ID type: str display_name: description: - Tier-0 Interface display name - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str description: description: Tier-0 Interface description type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource. - Required if I(segp_id != null) choices: - present - absent tags: description: Opaque identifiers meaningful to the API user type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str access_vlan_id: description: Vlan id type: int ipv6_ndra_profile_display_name: description: Same as ipv6_ndra_profile_id. Either one should be specified. type: str ipv6_ndra_profile_id: description: Configuration IPv6 NDRA profile. Only one NDRA profile can be configured. type: str mtu: description: - MTU size - Maximum transmission unit (MTU) specifies the size of the largest packet that a network protocol can transmit. type: int multicast: description: Multicast PIM configuration type: dict suboptions: enabled: description: enable/disable PIM configuration type: bool default: False urpf_mode: description: Unicast Reverse Path Forwarding mode type: str choices: - NONE - STRICT default: STRICT create_or_update_subresource_first: type: bool default: false description: - Can be used to create subresources first. - Can be specified for each subresource. delete_subresource_first: type: bool default: true description: - Can be used to delete subresources first. - Can be specified for each subresource. achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager. - Can be specified for each subresource. segment_id: description: Specify Segment to which this interface is connected to. Required if id is specified. type: str segment_display_name: description: - Same as segment_id - Either this or segment_id must be specified. If both are specified, segment_id takes precedence. type: str type: description: Interface type choices: - "EXTERNAL" - "LOOPBACK" - "SERVICE" default: "EXTERNAL" type: str edge_node_info: description: - Info to create policy path to edge node to handle externalconnectivity. - Required if interface type is EXTERNAL and I(id != null) type: dict suboptions: site_id: description: site_id where edge node is located default: default type: str enforcementpoint_id: description: enforcementpoint_id where edge node is located default: default type: str edge_cluster_id: description: edge_cluster_id where edge node is located type: str edge_cluster_display_name: description: - display name of the edge cluster. - either this or edge_cluster_id must be specified. If both are specified, edge_cluster_id takes precedence type: str edge_node_id: description: ID of the edge node type: str edge_node_display_name: description: - Display name of the edge node. - either this or edge_node_id must be specified. If both are specified, edge_node_id takes precedence. type: str subnets: description: - IP address and subnet specification for interface - Specify IP address and network prefix for interface. - Required if I(id != null). type: list ''' EXAMPLES = ''' - name: create Tier0 nsxt_policy_tier0: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False display_name: test-tier0-1 state: present ha_mode: "ACTIVE_STANDBY" failover_mode: "PREEMPTIVE" disable_firewall: True force_whitelisting: True rd_admin_field: "122.34.12.124" tags: - scope: "a" tag: "b" static_routes: - state: present display_name: test-sr network: '12.12.12.0/24' next_hops: - ip_address: "192.165.1.4" bfd_peers: - state: present display_name: test-peer-1 peer_address: "192.100.100.5" bfd_profile_id: test-bfd-config locale_services: - state: present id: "test-t0ls" route_redistribution_config: redistribution_rules: - name: abc route_redistribution_types: ["TIER0_STATIC", "TIER0_NAT"] edge_cluster_info: edge_cluster_id: "7ef91a10-c780-4f48-a279-a5662db4ffa3" preferred_edge_nodes_info: - edge_cluster_id: "7ef91a10-c780-4f48-a279-a5662db4ffa3" edge_node_id: "e10c42dc-db27-11e9-8cd0-000c291af7ee" BGP: state: present local_as_num: '1211' inter_sr_ibgp: False graceful_restart_config: mode: "GR_AND_HELPER" timer: restart_timer: 12 route_aggregations: - prefix: "10.1.1.0/24" - prefix: "11.1.0.0/24" summary_only: False neighbors: - display_name: neigh1 address: "1.2.3.4" remote_as_num: "12" state: present interfaces: - id: "test-t0-t0ls-iface" display_name: "test-t0-t0ls-iface" state: present subnets: - ip_addresses: ["35.1.1.1"] prefix_len: 24 segment_id: "test-seg-4" edge_node_info: edge_cluster_id: "7ef91a10-c780-4f48-a279-a5662db4ffa3" edge_node_id: "e10c42dc-db27-11e9-8cd0-000c291af7ee" mtu: 1500 urpf_mode: "NONE" multicast: enabled: True ipv6_ndra_profile_display_name: test vrf_config: display_name: my-vrf id: my-vrf2 tier0_display_name: node-t0 tags: - scope: scope-tag-1 tag: value-tag-1 route_distinguisher: 'ASN:4000' evpn_transit_vni: 6000 ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import ( TIER_0_URL, IPV6_DAD_PROFILE_URL, IPV6_NDRA_PROFILE_URL, DHCP_RELAY_CONFIG_URL, EDGE_CLUSTER_URL, EDGE_NODE_URL, SEGMENT_URL, TIER_0_STATIC_ROUTE_URL, TIER_0_LOCALE_SERVICE_URL, TIER_0_LS_INTERFACE_URL, TIER_0_BGP_NEIGHBOR_URL, TIER_0_BFD_PEERS) class NSXTTier0(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): tier0_arg_spec = {} tier0_arg_spec.update( default_rule_logging=dict( required=False, type='bool' ), ha_mode=dict( required=False, type='str', default="ACTIVE_ACTIVE", choices=['ACTIVE_STANDBY', 'ACTIVE_ACTIVE'] ), disable_firewall=dict( required=False, type='bool', default=False ), failover_mode=dict( required=False, type='str', default='NON_PREEMPTIVE', choices=['NON_PREEMPTIVE', 'PREEMPTIVE'] ), force_whitelisting=dict( required=False, type='bool', default=False ), internal_transit_subnets=dict( required=False, type='list' ), intersite_config=dict( required=False, type='dict', options=dict( fallback_sites=dict( required=False, type='list' ), intersite_transit_subnet=dict( default="169.254.32.0/20", type='str' ), last_admin_active_epoch=dict( required=False, type='int' ), primary_site_path=dict( required=False, type='str' ), ) ), ipv6_ndra_profile_id=dict( required=False, type='str' ), ipv6_ndra_profile_display_name=dict( required=False, type='str' ), ipv6_dad_profile_id=dict( required=False, type='str' ), ipv6_dad_profile_display_name=dict( required=False, type='str' ), transit_subnets=dict( required=False, type='list' ), dhcp_config_id=dict( required=False, type='str' ), dhcp_config_display_name=dict( required=False, type='str' ), rd_admin_field=dict( required=False, type='str' ), vrf_config=dict( required=False, type='dict', options=dict( # Note that only default site_id and # enforcementpoint_id are used description=dict( type='str', default="" ), display_name=dict( type='str', ), evpn_transit_vni=dict( type='int' ), id=dict( type='str' ), route_distinguisher=dict( type='str' ), route_targets=dict( type='list', elements='dict', options=dict( description=dict( type='str', default="" ), display_name=dict( type='str', ), export_route_targets=dict( type='list', ), id=dict( type='str', ), import_route_targets=dict( type='list', ), tags=dict( type='list', elements='dict', options=dict( scope=dict( type='str', ), tag=dict( type='str', ), ) ), ) ), tags=dict( type='list', elements='dict', options=dict( scope=dict( type='str', ), tag=dict( type='str', ), ) ), tier0_display_name=dict( type='str' ), tier0_id=dict( type='str' ), ) ), ) return tier0_arg_spec @staticmethod def get_resource_base_url(baseline_args=None): return TIER_0_URL def update_resource_params(self, nsx_resource_params): ipv6_profile_paths = [] if self.do_resource_params_have_attr_with_id_or_display_name( "ipv6_ndra_profile"): ipv6_ndra_profile_id = self.get_id_using_attr_name_else_fail( "ipv6_ndra_profile", nsx_resource_params, IPV6_NDRA_PROFILE_URL, "Ipv6NdraProfile") ipv6_profile_paths.append( IPV6_NDRA_PROFILE_URL + "/" + ipv6_ndra_profile_id) if self.do_resource_params_have_attr_with_id_or_display_name( "ipv6_dad_profile"): ipv6_dad_profile_id = self.get_id_using_attr_name_else_fail( "ipv6_dad_profile", nsx_resource_params, IPV6_DAD_PROFILE_URL, "Ipv6DadProfile") ipv6_profile_paths.append( IPV6_DAD_PROFILE_URL + "/" + ipv6_dad_profile_id) if ipv6_profile_paths: nsx_resource_params["ipv6_profile_paths"] = ipv6_profile_paths if self.do_resource_params_have_attr_with_id_or_display_name( "dhcp_config"): dhcp_config_id = self.get_id_using_attr_name_else_fail( "dhcp_config", nsx_resource_params, DHCP_RELAY_CONFIG_URL, "DhcpRelayConfig") nsx_resource_params["dhcp_config_paths"] = [ DHCP_RELAY_CONFIG_URL + "/" + dhcp_config_id] if 'vrf_config' in nsx_resource_params: # vrf config is attached vrf_config = nsx_resource_params['vrf_config'] vrf_id = vrf_config.get('id') vrf_display_name = vrf_config.get('display_name') if not (vrf_display_name or vrf_id): self.exit_with_failure(msg="Please specify either the ID or " "display_name of the VRF in the " "vrf_config using id or display_name") tier0_id = vrf_config.pop('tier0_id', None) if not tier0_id: tier0_id = self.get_id_using_attr_name_else_fail( 'tier0', vrf_config, NSXTTier0.get_resource_base_url(), 'Tier0') vrf_config['tier0_path'] = ( NSXTTier0.get_resource_base_url() + "/" + tier0_id) vrf_config['resource_type'] = 'Tier0VrfConfig' if 'route_targets' in vrf_config: route_targets = vrf_config['route_targets'] or [] for route_target in route_targets: route_target['resource_type'] = 'VrfRouteTargets' def update_parent_info(self, parent_info): parent_info["tier0_id"] = self.id class NSXTTier0StaticRoutes(NSXTBaseRealizableResource): @staticmethod def get_resource_update_priority(): # Create this first return 2 def get_spec_identifier(self): return NSXTTier0.NSXTTier0StaticRoutes.get_spec_identifier() @classmethod def get_spec_identifier(cls): return "static_routes" @staticmethod def get_resource_spec(): tier0_sr_arg_spec = {} tier0_sr_arg_spec.update( network=dict( required=True, type='str' ), next_hops=dict( required=True, type='list', elements='dict', options=dict( admin_distance=dict( type='int', default=1 ), ip_address=dict( type='str' ), scope=dict( type='list', elements='str' ) ) ), ) return tier0_sr_arg_spec @staticmethod def get_resource_base_url(parent_info): tier0_id = parent_info.get("tier0_id", 'default') return TIER_0_STATIC_ROUTE_URL.format(tier0_id) def update_parent_info(self, parent_info): parent_info["sr_id"] = self.id class NSXTTier0SRBFDPeer(NSXTBaseRealizableResource): def get_spec_identifier(self): return (NSXTTier0.NSXTTier0StaticRoutes.NSXTTier0SRVFDPeer. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "bfd_peers" @staticmethod def get_resource_spec(): tier0_sr_bfd_peer_arg_spec = {} tier0_sr_bfd_peer_arg_spec.update( bfd_profile_id=dict( type='str' ), bfd_profile_display_name=dict( type='str' ), enabled=dict( type='bool', default=True ), peer_address=dict( type='str', required=True ), source_addresses=dict( type='list', ), ) return tier0_sr_bfd_peer_arg_spec @staticmethod def get_resource_base_url(parent_info): tier0_id = parent_info.get("tier0_id", 'default') return TIER_0_BFD_PEERS.format(tier0_id) def update_resource_params(self, nsx_resource_params): bfd_profile_id = self.get_id_using_attr_name_else_fail( "bfd_profile", nsx_resource_params, '/infra/bfd-profiles', 'BFD Profile') nsx_resource_params.pop('bfd_profile_id', None) nsx_resource_params.pop('bfd_profile_display_name', None) nsx_resource_params['bfd_profile_path'] = ( '/infra/bfd-profiles/{}'.format(bfd_profile_id)) class NSXTTier0LocaleService(NSXTBaseRealizableResource): def get_spec_identifier(self): return NSXTTier0.NSXTTier0LocaleService.get_spec_identifier() @classmethod def get_spec_identifier(cls): return "locale_services" def infer_resource_id(self, parent_info): all_locale_services = self.get_all_resources_from_nsx() if len(all_locale_services) == 0: self.module.fail_json( msg="No {} found under Tier0 gateway {}. Please specify " "the id or display_name of the LocaleService to be " "created".format( self.get_spec_identifier(), parent_info.get("tier0_id", 'default'))) if len(all_locale_services) > 1: ls_ids = [ls['id'] for ls in all_locale_services] self.module.fail_json( msg="Multiple {} found under Tier0 gateway {} with IDs " "{}. Please specify the id of the LocaleService " "to be updated".format( self.get_spec_identifier(), parent_info.get("tier0_id", 'default'), ls_ids)) return all_locale_services[0]['id'] @staticmethod def get_resource_spec(): tier0_ls_arg_spec = {} tier0_ls_arg_spec.update( edge_cluster_info=dict( required=False, type='dict', options=dict( # Note that only default site_id and # enforcementpoint_id are used site_id=dict( type='str', default="default" ), enforcementpoint_id=dict( type='str', default="default" ), edge_cluster_id=dict( type='str' ), edge_cluster_display_name=dict( type='str' ) ) ), preferred_edge_nodes_info=dict( required=False, type='list', elements='dict', options=dict( # Note that only default site_id and # enforcementpoint_id are used site_id=dict( type='str', default="default" ), enforcementpoint_id=dict( type='str', default="default" ), edge_cluster_id=dict( type='str' ), edge_cluster_display_name=dict( type='str' ), edge_node_id=dict( type='str' ), edge_node_display_name=dict( type='str' ) ) ), route_redistribution_types=dict( required=False, type='list', elements='str', ), route_redistribution_config=dict( type='dict', required=False, options=dict( bgp_enabled=dict( type='bool', default=False ), redistribution_rules=dict( type='list', required=False, elements='dict', options=dict( name=dict( type='str', required=False ), route_map_path=dict( type='str', required=False ), route_redistribution_types=dict( type='list', elements='str', required=False ), ) ) ) ), ha_vip_configs=dict( type='list', elements='dict', options=dict( enabled=dict( default=True, type='bool' ), external_interface_info=dict( required=True, type='list', elements='dict', options=dict( id=dict( type='str' ), display_name=dict( type='str' ) ) ), vip_subnets=dict( type='list', elements='dict', required=True, options=dict( ip_addresses=dict( type='list', required=True ), prefix_len=dict( type='int', rqeuired=True ) ) ), ) ) ) return tier0_ls_arg_spec @staticmethod def get_resource_base_url(parent_info): tier0_id = parent_info.get("tier0_id", 'default') return TIER_0_LOCALE_SERVICE_URL.format(tier0_id) def update_resource_params(self, nsx_resource_params): if "edge_cluster_info" in nsx_resource_params: edge_cluster_info = nsx_resource_params.pop( "edge_cluster_info") site_id = edge_cluster_info["site_id"] enforcementpoint_id = edge_cluster_info["enforcementpoint_id"] edge_cluster_base_url = ( EDGE_CLUSTER_URL.format(site_id, enforcementpoint_id)) edge_cluster_id = self.get_id_using_attr_name_else_fail( "edge_cluster", edge_cluster_info, edge_cluster_base_url, "Edge Cluster") nsx_resource_params["edge_cluster_path"] = ( edge_cluster_base_url + "/" + edge_cluster_id) if "preferred_edge_nodes_info" in nsx_resource_params: preferred_edge_nodes_info = nsx_resource_params.pop( "preferred_edge_nodes_info") nsx_resource_params["preferred_edge_paths"] = [] for preferred_edge_node_info in preferred_edge_nodes_info: site_id = preferred_edge_node_info.get( "site_id", "default") enforcementpoint_id = preferred_edge_node_info.get( "enforcementpoint_id", "default") edge_cluster_base_url = ( EDGE_CLUSTER_URL.format(site_id, enforcementpoint_id)) edge_cluster_id = self.get_id_using_attr_name_else_fail( "edge_cluster", preferred_edge_node_info, edge_cluster_base_url, "Edge Cluster") edge_node_base_url = EDGE_NODE_URL.format( site_id, enforcementpoint_id, edge_cluster_id) edge_node_id = self.get_id_using_attr_name_else_fail( "edge_node", preferred_edge_node_info, edge_node_base_url, "Edge Node") nsx_resource_params["preferred_edge_paths"].append( edge_node_base_url + "/" + edge_node_id) if 'ha_vip_configs' in nsx_resource_params: for ha_vip_config in nsx_resource_params['ha_vip_configs']: external_interface_info = ha_vip_config.pop( 'external_interface_info') external_interface_paths = [] for external_interface in ( external_interface_info): interface_base_url = ( NSXTTier0.NSXTTier0LocaleService. NSXTTier0Interface.get_resource_base_url( self.get_parent_info())) external_interface_paths.append( interface_base_url + "/" + self.get_id_using_attr_name_else_fail( None, external_interface, interface_base_url, NSXTTier0.NSXTTier0LocaleService. NSXTTier0Interface.__name__)) ha_vip_config[ 'external_interface_paths'] = external_interface_paths def update_parent_info(self, parent_info): parent_info["ls_id"] = self.id class NSXTTier0Interface(NSXTBaseRealizableResource): def get_spec_identifier(self): return ( NSXTTier0.NSXTTier0LocaleService.NSXTTier0Interface. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "interfaces" @staticmethod def get_resource_spec(): tier0_ls_int_arg_spec = {} tier0_ls_int_arg_spec.update( access_vlan_id=dict( type='int' ), ipv6_ndra_profile_display_name=dict( type='str' ), ipv6_ndra_profile_id=dict( type='str' ), mtu=dict( type='int' ), multicast=dict( type='dict', suboptions=dict( enabled=dict( type='bool', default=False ) ) ), segment_id=dict( type='str' ), segment_display_name=dict( type='str' ), edge_node_info=dict( required=True, type='dict', options=dict( site_id=dict( type='str', default="default" ), enforcementpoint_id=dict( type='str', default="default" ), edge_cluster_id=dict( type='str' ), edge_cluster_display_name=dict( type='str' ), edge_node_id=dict( type='str' ), edge_node_display_name=dict( type='str' ) ) ), subnets=dict( required=True, type='list', elements='dict', options=dict( ip_addresses=dict( type='list', elements='str' ), prefix_len=dict( type='int' ) ) ), type=dict( type='str', default="EXTERNAL", choices=["EXTERNAL", "SERVICE", "LOOPBACK"] ), urpf_mode=dict( type='str', default='STRICT', choices=['NONE', 'STRICT'] ) ) return tier0_ls_int_arg_spec @staticmethod def get_resource_base_url(parent_info): tier0_id = parent_info.get("tier0_id", 'default') locale_service_id = parent_info.get("ls_id", 'default') return TIER_0_LS_INTERFACE_URL.format( tier0_id, locale_service_id) def update_resource_params(self, nsx_resource_params): ipv6_profile_paths = [] if self.do_resource_params_have_attr_with_id_or_display_name( "ipv6_ndra_profile"): ipv6_ndra_profile_id = ( self.get_id_using_attr_name_else_fail( "ipv6_ndra_profile", nsx_resource_params, IPV6_NDRA_PROFILE_URL, "Ipv6NdraProfile")) ipv6_profile_paths.append( IPV6_NDRA_PROFILE_URL + "/" + ipv6_ndra_profile_id) if ipv6_profile_paths: nsx_resource_params[ "ipv6_profile_paths"] = ipv6_profile_paths # segment_id is a required attr segment_id = self.get_id_using_attr_name_else_fail( "segment", nsx_resource_params, SEGMENT_URL, "Segment") nsx_resource_params["segment_path"] = ( SEGMENT_URL + "/" + segment_id) # edge_node_info is a required attr edge_node_info = nsx_resource_params.pop("edge_node_info") site_id = edge_node_info.get("site_id", "default") enforcementpoint_id = edge_node_info.get( "enforcementpoint_id", "default") edge_cluster_base_url = ( EDGE_CLUSTER_URL.format(site_id, enforcementpoint_id)) edge_cluster_id = self.get_id_using_attr_name_else_fail( "edge_cluster", edge_node_info, edge_cluster_base_url, "Edge Cluster") edge_node_base_url = EDGE_NODE_URL.format( site_id, enforcementpoint_id, edge_cluster_id) edge_node_id = self.get_id_using_attr_name_else_fail( "edge_node", edge_node_info, edge_node_base_url, 'Edge Node') nsx_resource_params["edge_path"] = ( edge_node_base_url + "/" + edge_node_id) class NSXTTier0LocaleServiceBGP(NSXTBaseRealizableResource): def __init__(self): self.id = 'bgp' super().__init__() def skip_delete(self): return True def get_spec_identifier(self): return ( NSXTTier0.NSXTTier0LocaleService.NSXTTier0LocaleServiceBGP. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "BGP" @staticmethod def get_resource_spec(): tier0_ls_arg_spec = {} tier0_ls_arg_spec.update( ecmp=dict( default=True, type='bool' ), enabled=dict( default=True, type='bool' ), graceful_restart_config=dict( required=False, type='dict', options=dict( mode=dict( required=False, type='str', choices=['DISABLE', 'GR_AND_HELPER', 'HELPER_ONLY'], default='HELPER_ONLY' ), timer=dict( required=False, type='dict', options=dict( restart_timer=dict( required=False, type='int', default=180 ), stale_route_timer=dict( required=False, type='int', default=600 ), ) ) ) ), inter_sr_ibgp=dict( required=False, type='bool' ), local_as_num=dict( type='str' ), multipath_relax=dict( type='bool', default=True ), route_aggregations=dict( required=False, type='list', elements='dict', options=dict( prefix=dict( required=True, type='str' ), summary_only=dict( type='bool', default=True ) ) ) ) return tier0_ls_arg_spec @staticmethod def get_resource_base_url(parent_info): tier0_id = parent_info.get("tier0_id", 'default') locale_service_id = parent_info.get("ls_id", 'default') return (TIER_0_LOCALE_SERVICE_URL + '/{}').format( tier0_id, locale_service_id) @classmethod def allows_multiple_resource_spec(cls): return False class NSXTTier0LocaleServiceBGPNeighbor( NSXTBaseRealizableResource): def get_spec_identifier(self): return ( NSXTTier0.NSXTTier0LocaleService. NSXTTier0LocaleServiceBGP. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "neighbors" @staticmethod def get_resource_spec(): tier0_ls_arg_spec = {} tier0_ls_arg_spec.update( allow_as_in=dict( default=False, type='bool' ), bfd=dict( type='dict', required=False, options=dict( enabled=dict( required=False, default=False, type='bool' ), interval=dict( required=False, type='int', default=1000 ), multiple=dict( required=False, type='int', default=3 ) ) ), graceful_restart_mode=dict( type='str', required=False, choices=['DISABLE', 'GR_AND_HELPER', 'HELPER_ONLY'] ), hold_down_time=dict( type='int', default=180 ), keep_alive_time=dict( type='int', default=60 ), maximum_hop_limit=dict( type='int', default=1 ), password=dict( type='str', required=False ), remote_as_num=dict( required=True, type='str' ), route_filtering=dict( required=False, type='list', elements='dict', options=dict( address_family=dict( required=False, type='str', choices=['IPV4', 'IPV6', 'VPN'] ), enabled=dict( type='bool', default=True, required=False ), in_route_filters=dict( type='list', required=False ), out_route_filters=dict( type='list', required=False ) ) ), source_addresses=dict( required=False, type='list' ), neighbor_address=dict( required=True, type='str' ) ) return tier0_ls_arg_spec @staticmethod def get_resource_base_url(parent_info): tier0_id = parent_info.get("tier0_id", 'default') locale_service_id = parent_info.get("ls_id", 'default') return TIER_0_BGP_NEIGHBOR_URL.format( tier0_id, locale_service_id) if __name__ == '__main__': nsxt_tier0 = NSXTTier0() nsxt_tier0.realize() ================================================ FILE: plugins/modules/nsxt_policy_tier1.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: nsxt_policy_tier1 short_description: 'Create/Update/Delete a Tier-1 and associated resources' description: Creates/Updates/Deletes a Tier-1 resource using the Policy API. Assocaited resources include 'Tier-1 Locale Service' and 'Tier-1 Interface'. 'Tier-1 Locale Service' and 'Tier-1 Interface' attributes must be prepended with 't1ls' and 't1iface' respectively. version_added: '2.8' author: 'Gautam Verma' extends_documentation_fragment: - vmware.ansible_for_nsxt.vmware_nsxt options: id: description: Tier-1 ID required: false type: str description: description: Tier-1 description type: str default_rule_logging: description: Enable logging for whitelisted rule. Indicates if logging should be enabled for the default whitelisting rule. default: false disable_firewall: description: Disable or enable gateway fiewall. default: False type: bool failover_mode: description: Determines the behavior when a Tier-1 instance in ACTIVE-STANDBY high-availability mode restarts after a failure. If set to PREEMPTIVE, the preferred node will take over, even if it causes another failure. If set to NON_PREEMPTIVE, then the instance that restarted will remain secondary. This property must not be populated unless the ha_mode property is set to ACTIVE_STANDBY. choices: - 'NON_PREEMPTIVE' - 'PREEMPTIVE' default: 'NON_PREEMPTIVE' type: str enable_standby_relocation: description: - Flag to enable standby service router relocation. - Standby relocation is not enabled until edge cluster is configured for Tier1. type: bool default: false force_whitelisting: description: Flag to add whitelisting FW rule during realization. default: False type: bool intersite_config: description: Inter site routing configuration when the gateway is streched. type: dict suboptions: fallback_sites: description: Fallback site to be used as new primary site on current primary site failure. Disaster recovery must be initiated via API/UI. Fallback site configuration is supported only for T0 gateway. T1 gateway will follow T0 gateway's primary site during disaster recovery type: list intersite_transit_subnet: description: - Transit subnet in CIDR format - IPv4 subnet for inter-site transit segment connecting service routers across sites for stretched gateway. For IPv6 link local subnet is auto configured type: str default: "169.254.32.0/20" last_admin_active_epoch: description: - Epoch of last time admin changing active LocaleServices - Epoch(in seconds) is auto updated based on system current timestamp when primary locale service is updated. It is used for resolving conflict during site failover. If system clock not in sync then User can optionally override this. New value must be higher than the current value. type: int primary_site_path: description: - Primary egress site for gateway. - Primary egress site for gateway. T0/T1 gateway in Active/Standby mode supports stateful services on primary site. In this mode primary site must be set if gateway is stretched to more than one site. For T0 gateway in Active/Active primary site is optional field. If set then secondary site prefers routes learned from primary over locally learned routes. This field is not applicable for T1 gateway with no services type: str ipv6_ndra_profile_id: description: IPv6 NDRA profile configuration on Tier1. Either or both NDRA and/or DAD profiles can be configured. Related attribute ipv6_dad_profile_id. type: str ipv6_ndra_profile_display_name: description: Same as ipv6_ndra_profile_id. Either one can be specified. If both are specified, ipv6_ndra_profile_id takes precedence. type: str ipv6_dad_profile_id: description: IPv6 DRA profile configuration on Tier1. Either or both NDRA and/or DAD profiles can be configured. Related attribute ipv6_ndra_profile_id. type: str ipv6_dad_profile_display_name: description: Same as ipv6_dad_profile_id. Either one can be specified. If both are specified, ipv6_dad_profile_id takes precedence. type: str dhcp_config_id: description: DHCP configuration for Segments connected to Tier-1. DHCP service is configured in relay mode. type: str dhcp_config_display_name: description: Same as dhcp_config_id. Either one can be specified. If both are specified, dhcp_config_id takes precedence. type: str pool_allocation: description: - Edge node allocation size - Supports edge node allocation at different sizes for routing and load balancer service to meet performance and scalability requirements. - ROUTING - Allocate edge node to provide routing services. - LB_SMALL, LB_MEDIUM, LB_LARGE, LB_XLARGE - Specify size of load balancer service that will be configured on TIER1 gateway. type: str choices: - ROUTING - LB_SMALL - LB_MEDIUM - LB_LARGE - LB_XLARGE default: ROUTING qos_profile: description: QoS Profile configuration for Tier1 router link connected to Tier0 gateway. type: dict suboptions: egress_qos_profile_path: description: Policy path to gateway QoS profile in egress direction. type: str ingress_qos_profile_path: description: Policy path to gateway QoS profile in ingress direction. type: str route_advertisement_rules: description: Route advertisement rules and filtering type: list suboptions: action: description: - Action to advertise filtered routes to the connected Tier0 gateway. choices: - PERMIT: Enables the advertisment - DENY: Disables the advertisement type: str required: true name: description: Display name for rule type: str required: true prefix_operator: description: - Prefix operator to filter subnets. - GE prefix operator filters all the routes with prefix length greater than or equal to the subnets configured. - EQ prefix operator filter all the routes with prefix length equal to the subnets configured. type: str choices: - GE - EQ route_advertisement_types: description: - Enable different types of route advertisements. - By default, Routes to IPSec VPN local-endpoint subnets (TIER1_IPSEC_LOCAL_ENDPOINT) are advertised if no value is supplied here. type: list choices: - 'TIER1_STATIC_ROUTES' - 'TIER1_CONNECTED' - 'TIER1_NAT' - 'TIER1_LB_VIP' - 'TIER1_LB_SNAT' - 'TIER1_DNS_FORWARDER_IP' - 'TIER1_IPSEC_LOCAL_ENDPOINT' subnets: description: Network CIDRs to be routed. type: list route_advertisement_types: description: - Enable different types of route advertisements. - By default, Routes to IPSec VPN local-endpoint subnets (TIER1_IPSEC_LOCAL_ENDPOINT) are advertised if no value is supplied here. type: list choices: - 'TIER1_STATIC_ROUTES' - 'TIER1_CONNECTED' - 'TIER1_NAT' - 'TIER1_LB_VIP' - 'TIER1_LB_SNAT' - 'TIER1_DNS_FORWARDER_IP' - 'TIER1_IPSEC_LOCAL_ENDPOINT' tier0_id: description: Tier-1 connectivity to Tier-0 type: str tier0_display_name: description: Same as tier0_id. Either one can be specified. If both are specified, tier0_id takes precedence. type: str static_routes: type: list element: dict description: This is a list of Static Routes that need to be created, updated, or deleted suboptions: id: description: Tier-1 Static Route ID. required: false type: str display_name: description: - Tier-1 Static Route display name. - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str description: description: - Tier-1 Static Route description. type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource. - Must be specified in order to modify the resource choices: - present - absent network: description: Network address in CIDR format required: true type: str next_hops: description: Next hop routes for network type: list elements: dict suboptions: admin_distance: description: Cost associated with next hop route type: int default: 1 ip_address: description: Next hop gateway IP address type: str scope: description: - Interface path associated with current route - For example, specify a policy path referencing the IPSec VPN Session type: list tags: description: Opaque identifiers meaningful to the API user type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager locale_services: type: list element: dict description: This is a list of Locale Services that need to be created, updated, or deleted suboptions: id: description: Tier-1 Locale Service ID type: str display_name: description: - Tier-1 Locale Service display name. - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str description: description: Tier-1 Locale Service description type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource. - Required if I(segp_id != null) choices: - present - absent tags: description: Opaque identifiers meaningful to the API user. type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager edge_cluster_info: description: Used to create path to edge cluster. Auto-assigned if associated enforcement-point has only one edge cluster. type: dict suboptions: site_id: description: site_id where edge cluster is located default: default type: str enforcementpoint_id: description: enforcementpoint_id where edge cluster is located default: default type: str edge_cluster_id: description: ID of the edge cluster required: true type: str edge_cluster_display_name: description: - display name of the edge cluster. - Either this or edge_cluster_id must be specified. If both are specified, edge_cluster_id takes precedence type: str preferred_edge_nodes_info: description: Used to create paths to edge nodes. Specified edge is used as preferred edge cluster member when failover mode is set to PREEMPTIVE, not applicable otherwise. type: list suboptions: site_id: description: site_id where edge node is located default: default type: str enforcementpoint_id: description: enforcementpoint_id where edge node is located default: default type: str edge_cluster_id: description: edge_cluster_id where edge node is located required: true type: str edge_cluster_display_name: description: - display name of the edge cluster. - either this or edge_cluster_id must be specified. If both are specified, edge_cluster_id takes precedence type: str edge_node_id: description: ID of the edge node type: str edge_node_display_name: description: - Display name of the edge node. - either this or edge_node_id must be specified. If both are specified, edge_node_id takes precedence type: str route_redistribution_types: description: - Enable redistribution of different types of routes on Tier-0. - This property is only valid for locale-service under Tier-0. - This property is deprecated, please use "route_redistribution_config" property to configure redistribution rules. choices: - TIER0_STATIC - Redistribute user added static routes. - TIER0_CONNECTED - Redistribute all subnets configured on Interfaces and routes related to TIER0_ROUTER_LINK, TIER0_SEGMENT, TIER0_DNS_FORWARDER_IP, TIER0_IPSEC_LOCAL_IP, TIER0_NAT types. - TIER1_STATIC - Redistribute all subnets and static routes advertised by Tier-1s. - TIER0_EXTERNAL_INTERFACE - Redistribute external interface subnets on Tier-0. - TIER0_LOOPBACK_INTERFACE - Redistribute loopback interface subnets on Tier-0. - TIER0_SEGMENT - Redistribute subnets configured on Segments connected to Tier-0. - TIER0_ROUTER_LINK - Redistribute router link port subnets on Tier-0. - TIER0_SERVICE_INTERFACE - Redistribute Tier0 service interface subnets. - TIER0_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets. - TIER0_IPSEC_LOCAL_IP - Redistribute IPSec subnets. - TIER0_NAT - Redistribute NAT IPs owned by Tier-0. - TIER0_EVPN_TEP_IP - Redistribute EVPN local endpoint subnets on Tier-0. - TIER1_NAT - Redistribute NAT IPs advertised by Tier-1 instances. - TIER1_LB_VIP - Redistribute LB VIP IPs advertised by Tier-1 instances. - TIER1_LB_SNAT - Redistribute LB SNAT IPs advertised by Tier-1 instances. - TIER1_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets on Tier-1 instances. - TIER1_CONNECTED - Redistribute all subnets configured on Segments and Service Interfaces. - TIER1_SERVICE_INTERFACE - Redistribute Tier1 service interface subnets. - TIER1_SEGMENT - Redistribute subnets configured on Segments connected to Tier1. - TIER1_IPSEC_LOCAL_ENDPOINT - Redistribute IPSec VPN local-endpoint subnets advertised by TIER1. type: list route_redistribution_config: description: Configure all route redistribution properties like enable/disable redistributon, redistribution rule and so on. type: dict suboptions: bgp_enabled: description: Flag to enable route redistribution. type: bool default: false redistribution_rules: description: List of redistribution rules. type: list elements: dict suboptions: name: description: Rule name type: str route_map_path: description: Route map to be associated with the redistribution rule type: str route_redistribution_types: description: Tier-0 route redistribution types choices: - TIER0_STATIC - Redistribute user added static routes. - TIER0_CONNECTED - Redistribute all subnets configured on Interfaces and routes related to TIER0_ROUTER_LINK, TIER0_SEGMENT, TIER0_DNS_FORWARDER_IP, TIER0_IPSEC_LOCAL_IP, TIER0_NAT types. - TIER1_STATIC - Redistribute all subnets and static routes advertised by Tier-1s. - TIER0_EXTERNAL_INTERFACE - Redistribute external interface subnets on Tier-0. - TIER0_LOOPBACK_INTERFACE - Redistribute loopback interface subnets on Tier-0. - TIER0_SEGMENT - Redistribute subnets configured on Segments connected to Tier-0. - TIER0_ROUTER_LINK - Redistribute router link port subnets on Tier-0. - TIER0_SERVICE_INTERFACE - Redistribute Tier0 service interface subnets. - TIER0_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets. - TIER0_IPSEC_LOCAL_IP - Redistribute IPSec subnets. - TIER0_NAT - Redistribute NAT IPs owned by Tier-0. - TIER0_EVPN_TEP_IP - Redistribute EVPN local endpoint subnets on Tier-0. - TIER1_NAT - Redistribute NAT IPs advertised by Tier-1 instances. - TIER1_LB_VIP - Redistribute LB VIP IPs advertised by Tier-1 instances. - TIER1_LB_SNAT - Redistribute LB SNAT IPs advertised by Tier-1 instances. - TIER1_DNS_FORWARDER_IP - Redistribute DNS forwarder subnets on Tier-1 instances. - TIER1_CONNECTED - Redistribute all subnets configured on Segments and Service Interfaces. - TIER1_SERVICE_INTERFACE - Redistribute Tier1 service interface subnets. - TIER1_SEGMENT - Redistribute subnets configured on Segments connected to Tier1. - TIER1_IPSEC_LOCAL_ENDPOINT - Redistribute IPSec VPN local-endpoint subnets advertised by TIER1. type: list ha_vip_configs: type: list elements: dict description: - Array of HA VIP Config. - This configuration can be defined only for Active-Standby Tier0 gateway to provide redundancy. For mulitple external interfaces, multiple HA VIP configs must be defined and each config will pair exactly two external interfaces. The VIP will move and will always be owned by the Active node. When this property is configured, configuration of dynamic-routing is not allowed. suboptions: enabled: description: Flag to enable this HA VIP config. default: true type: bool external_interface_paths: description: - Policy paths to Tier0 external interfaces for providing redundancy - Policy paths to Tier0 external interfaces which are to be paired to provide redundancy. Floating IP will be owned by one of these interfaces depending upon which edge node is Active. type: list vip_subnets: description: - VIP floating IP address subnets - Array of IP address subnets which will be used as floating IP addresses. type: list suboptions: ip_addresses: description: IP addresses assigned to interface type: list required: true prefix_len: description: Subnet prefix length type: int required: true interfaces: type: list element: dict description: Specify the interfaces associated with the Gateway in this section that need to be created, updated, or deleted suboptions: id: description: Tier-1 Interface ID required: false type: str description: description: Tier-1 Interface description type: str display_name: description: - Tier-1 Interface display name - Either this or id must be specified. If both are specified, id takes precedence. required: false type: str state: description: - State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource. - Required if I(segp_id != null). choices: - present - absent tags: description: Opaque identifiers meaningful to the API user type: dict suboptions: scope: description: Tag scope. required: true type: str tag: description: Tag value. required: true type: str achieve_subresource_state_if_del_parent: type: bool default: false description: - Can be used to achieve the state of subresources even if the parent(base) resource's state is absent. - Can be specified for each subresource. do_wait_till_create: type: bool default: false description: - Can be used to wait for the realization of subresource before the request to create the next resource is sent to the Manager ipv6_ndra_profile_id: description: - Configrue IPv6 NDRA profile. Only one NDRA profile can be configured - Required if I(id != null) type: str mtu: description: - MTU size - Maximum transmission unit (MTU) specifies the size of the largest packet that a network protocol can transmit. type: int segment_id: description: - Specify Segment to which this interface is connected to. - Required if I(id != null) type: str segment_display_name: description: - Same as segment_id - Either this or segment_id must be specified. If both are specified, segment_id takes precedence. type: str subnets: description: - IP address and subnet specification for interface - Specify IP address and network prefix for interface - Required if I(id != null) type: list elements: dict suboptions: ip_addresses: description: IP addresses assigned to interface type: str prefix_len: description: Subnet prefix length type: str urpf_mode: description: Unicast Reverse Path Forwarding mode type: str choices: - NONE - STRICT default: STRICT ''' EXAMPLES = ''' - name: create Tier1 nsxt_policy_tier1: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False display_name: test-tier22222 state: present failover_mode: "PREEMPTIVE" disable_firewall: True force_whitelisting: True enable_standby_relocation: False tags: - scope: "a" tag: "b" route_advertisement_rules: - name: "test-route-advertisement-rules" route_advertisement_types: ['TIER1_STATIC_ROUTES', 'TIER1_CONNECTED'] subnets: ["35.1.1.1/23"] route_advertisement_types: - "TIER1_STATIC_ROUTES" - "TIER1_CONNECTED" - "TIER1_NAT" tier0_display_name: "node-t0" locale_services: - state: present display_name: test-t1ls-2 route_redistribution_config: redistribution_rules: - name: abc route_redistribution_types: ["TIER0_STATIC", "TIER0_NAT"] interfaces: - id: "test-t1-t1ls-iface-2" display_name: "test-t1-t1ls-iface" state: present subnets: - ip_addresses: ["35.1.1.1"] prefix_len: 24 segment_id: "test-seg-2" ipv6_ndra_profile_id: test mtu: 1400 urpf_mode: NONE ''' RETURN = '''# ''' import json import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import _ANSIBLE_ARGS as ANSIBLE_ARGS from ansible.module_utils._text import to_native from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource import NSXTBaseRealizableResource from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import ( TIER_0_URL, TIER_1_URL, IPV6_DAD_PROFILE_URL, IPV6_NDRA_PROFILE_URL, DHCP_RELAY_CONFIG_URL, EDGE_CLUSTER_URL, EDGE_NODE_URL, SEGMENT_URL, TIER_1_STATIC_ROUTE_URL, TIER_1_LOCALE_SERVICE_URL, TIER_1_LS_INTERFACE_URL, TIER_0_LOCALE_SERVICE_URL, TIER_0_LS_INTERFACE_URL) class NSXTTier1(NSXTBaseRealizableResource): @staticmethod def get_resource_spec(): tier1_arg_spec = {} tier1_arg_spec.update( default_rule_logging=dict( required=False, type='bool' ), dhcp_config_id=dict( required=False, type='str' ), dhcp_config_display_name=dict( required=False, type='str' ), disable_firewall=dict( required=False, type='bool', default=False ), enable_standby_relocation=dict( required=False, type='bool', default=False ), failover_mode=dict( required=False, type='str', default='NON_PREEMPTIVE', choices=['NON_PREEMPTIVE', 'PREEMPTIVE'] ), force_whitelisting=dict( required=False, type='bool', default=False ), intersite_config=dict( required=False, type='dict', options=dict( fallback_sites=dict( required=False, type='list' ), intersite_transit_subnet=dict( default="169.254.32.0/20", type='str' ), last_admin_active_epoch=dict( required=False, type='int' ), primary_site_path=dict( required=False, type='str' ), ) ), ipv6_ndra_profile_id=dict( required=False, type='str' ), ipv6_ndra_profile_display_name=dict( required=False, type='str' ), ipv6_dad_profile_id=dict( required=False, type='str' ), ipv6_dad_profile_display_name=dict( required=False, type='str' ), pool_allocation=dict( type='str', choices=['ROUTING', 'LB_SMALL', 'LB_MEDIUM', 'LB_LARGE', 'LB_XLARGE'], default='ROUTING' ), qos_profile=dict( type='dict', options=dict( egress_qos_profile_path=dict( type='str' ), ingress_qos_profile_path=dict( type='str' ) ) ), route_advertisement_rules=dict( required=False, type='list', options=dict( action=dict( required=False, type='str', default='PERMIT', choices=['PERMIT', 'DENY'] ), name=dict( required=True, type='str' ), prefix_operator=dict( required=False, type='str', default='GE', choices=['GE', 'EQ'] ), route_advertisement_types=dict( required=False, type='list', choices=['TIER1_STATIC_ROUTES', 'TIER1_CONNECTED', 'TIER1_NAT', 'TIER1_LB_VIP', 'TIER1_LB_SNAT', 'TIER1_DNS_FORWARDER_IP', 'TIER1_IPSEC_LOCAL_ENDPOINT'] ), subnets=dict( required=True, type='list' ) ) ), route_advertisement_types=dict( required=False, type='list', choices=['TIER1_STATIC_ROUTES', 'TIER1_CONNECTED', 'TIER1_NAT', 'TIER1_LB_VIP', 'TIER1_LB_SNAT', 'TIER1_DNS_FORWARDER_IP', 'TIER1_IPSEC_LOCAL_ENDPOINT' ] ), tier0_id=dict( required=False, type='str' ), tier0_display_name=dict( required=False, type='str' ) ) return tier1_arg_spec @staticmethod def get_resource_base_url(baseline_args=None): return TIER_1_URL def update_resource_params(self, nsx_resource_params): ipv6_profile_paths = [] if self.do_resource_params_have_attr_with_id_or_display_name( "ipv6_ndra_profile"): ipv6_ndra_profile_id = self.get_id_using_attr_name_else_fail( "ipv6_ndra_profile", nsx_resource_params, IPV6_NDRA_PROFILE_URL, "Ipv6NdraProfile") ipv6_profile_paths.append( IPV6_NDRA_PROFILE_URL + "/" + ipv6_ndra_profile_id) if self.do_resource_params_have_attr_with_id_or_display_name( "ipv6_dad_profile"): ipv6_dad_profile_id = self.get_id_using_attr_name_else_fail( "ipv6_dad_profile", nsx_resource_params, IPV6_DAD_PROFILE_URL, "Ipv6DadProfile") ipv6_profile_paths.append( IPV6_DAD_PROFILE_URL + "/" + ipv6_dad_profile_id) if ipv6_profile_paths: nsx_resource_params["ipv6_profile_paths"] = ipv6_profile_paths if self.do_resource_params_have_attr_with_id_or_display_name( "dhcp_config"): dhcp_config_id = self.get_id_using_attr_name_else_fail( "dhcp_config", nsx_resource_params, DHCP_RELAY_CONFIG_URL, "DhcpRelayConfig") nsx_resource_params["dhcp_config_paths"] = [ DHCP_RELAY_CONFIG_URL + "/" + dhcp_config_id] if self.do_resource_params_have_attr_with_id_or_display_name( "tier0"): tier0_id = self.get_id_using_attr_name_else_fail( "tier0", nsx_resource_params, TIER_0_URL, "Tier0") nsx_resource_params["tier0_path"] = ( TIER_0_URL + "/" + tier0_id) def update_parent_info(self, parent_info): parent_info["tier1_id"] = self.id class NSXTTier1StaticRoutes(NSXTBaseRealizableResource): def get_spec_identifier(self): return NSXTTier1.NSXTTier1StaticRoutes.get_spec_identifier() @classmethod def get_spec_identifier(cls): return "static_routes" @staticmethod def get_resource_spec(): tier1_sr_arg_spec = {} tier1_sr_arg_spec.update( network=dict( required=True, type='str' ), next_hops=dict( required=True, type='list', elements='dict', options=dict( admin_distance=dict( type='int', default=1 ), ip_address=dict( type='str' ), scope=dict( type='list', elements='str' ) ) ), ) return tier1_sr_arg_spec @staticmethod def get_resource_base_url(parent_info): tier1_id = parent_info.get("tier1_id", 'default') return TIER_1_STATIC_ROUTE_URL.format(tier1_id) class NSXTTier1LocaleService(NSXTBaseRealizableResource): def get_spec_identifier(self): return NSXTTier1.NSXTTier1LocaleService.get_spec_identifier() @classmethod def get_spec_identifier(cls): return "locale_services" def infer_resource_id(self, parent_info): all_locale_services = self.get_all_resources_from_nsx() if len(all_locale_services) == 0: self.module.fail_json( msg="No {} found under Tier1 gateway {}. Please specify " "the id or display_name of the LocaleService to be " "created".format( self.get_spec_identifier(), parent_info.get("tier1_id", 'default'))) if len(all_locale_services) > 1: ls_ids = [ls['id'] for ls in all_locale_services] self.module.fail_json( msg="Multiple {} found under Tier1 gateway {} with IDs " "{}. Please specify the id of the LocaleService " "to be updated".format( self.get_spec_identifier(), parent_info.get("tier1_id", 'default'), ls_ids)) return all_locale_services[0]['id'] @staticmethod def get_resource_spec(): tier1_ls_arg_spec = {} tier1_ls_arg_spec.update( edge_cluster_info=dict( required=False, type='dict', options=dict( # Note that only default site_id and # enforcementpoint_id are used site_id=dict( type='str', default="default" ), enforcementpoint_id=dict( type='str', default="default" ), edge_cluster_id=dict( type='str' ), edge_cluster_display_name=dict( type='str' ) ) ), preferred_edge_nodes_info=dict( required=False, type='list', options=dict( # Note that only default site_id and # enforcementpoint_id are used site_id=dict( type='str', default="default" ), enforcementpoint_id=dict( type='str', default="default" ), edge_cluster_id=dict( type='str' ), edge_cluster_display_name=dict( type='str' ), edge_node_id=dict( type='str' ), edge_node_display_name=dict( type='str' ) ) ), route_redistribution_types=dict( required=False, type='list', elements='str', ), route_redistribution_config=dict( type='dict', required=False, options=dict( bgp_enabled=dict( type='bool', default=False ), redistribution_rules=dict( type='list', required=False, elements='dict', options=dict( name=dict( type='str', required=False ), route_map_path=dict( type='str', required=False ), route_redistribution_types=dict( type='list', elements='str', required=False ), ) ) ) ), ha_vip_configs=dict( type='list', elements='dict', options=dict( enabled=dict( default=True, type='bool' ), external_interface_info=dict( required=True, type='list', elements='dict', options=dict( tier0_id=dict( type='str', ), tier0_display_name=dict( type='str', ), tier0_ls_id=dict( type='str', ), tier0_ls_display_name=dict( type='str', ), tier0_ls_interface_id=dict( type='str', ), tier0_ls_interface_display_name=dict( type='str', ), external_interface_path=dict( type='str' ) ) ), vip_subnets=dict( type='list', elements='dict', required=True, options=dict( ip_addresses=dict( type='list', required=True ), prefix_len=dict( type='int', rqeuired=True ) ) ), ) ) ) return tier1_ls_arg_spec @staticmethod def get_resource_base_url(parent_info): tier1_id = parent_info.get("tier1_id", 'default') return TIER_1_LOCALE_SERVICE_URL.format(tier1_id) def update_resource_params(self, nsx_resource_params): if "edge_cluster_info" in nsx_resource_params: edge_cluster_info = nsx_resource_params.pop( "edge_cluster_info") site_id = edge_cluster_info["site_id"] enforcementpoint_id = edge_cluster_info["enforcementpoint_id"] edge_cluster_base_url = ( EDGE_CLUSTER_URL.format(site_id, enforcementpoint_id)) edge_cluster_id = self.get_id_using_attr_name_else_fail( "edge_cluster", edge_cluster_info, edge_cluster_base_url, "Edge Cluster") nsx_resource_params["edge_cluster_path"] = ( edge_cluster_base_url + "/" + edge_cluster_id) if "preferred_edge_nodes_info" in nsx_resource_params: preferred_edge_nodes_info = nsx_resource_params.pop( "preferred_edge_nodes_info") nsx_resource_params["preferred_edge_paths"] = [] for preferred_edge_node_info in preferred_edge_nodes_info: site_id = preferred_edge_node_info.get( "site_id", "default") enforcementpoint_id = preferred_edge_node_info.get( "enforcementpoint_id", "default") edge_cluster_base_url = ( EDGE_CLUSTER_URL.format(site_id, enforcementpoint_id)) edge_cluster_id = self.get_id_using_attr_name_else_fail( "edge_cluster", preferred_edge_node_info, edge_cluster_base_url, 'Edge Cluster') edge_node_base_url = EDGE_NODE_URL.format( site_id, enforcementpoint_id, edge_cluster_id) edge_node_id = self.get_id_using_attr_name_else_fail( "edge_node", preferred_edge_node_info, edge_node_base_url, "Edge Node") nsx_resource_params["preferred_edge_paths"].append( edge_node_base_url + "/" + edge_node_id) if 'ha_vip_configs' in nsx_resource_params: for ha_vip_config in nsx_resource_params['ha_vip_configs']: external_interface_info = ha_vip_config.pop( 'external_interface_info') external_interface_paths = [] for external_interface in ( external_interface_info): external_interface_path = external_interface.get( 'external_interface_path') if not external_interface_path: tier0_id = self.get_id_using_attr_name_else_fail( 'tier0', external_interface, TIER_0_URL, "Tier 0") tier0_ls_id = ( self.get_id_using_attr_name_else_fail( 'tier0_ls', external_interface, TIER_0_LOCALE_SERVICE_URL, "Tier 0 Locale Service")) tier0_ls_inf_id = ( self.get_id_using_attr_name_else_fail( 'tier0_ls_interface', external_interface, TIER_0_LS_INTERFACE_URL, "Tier 0 Interface")) external_interface_path = ( TIER_0_LS_INTERFACE_URL.format( tier0_id, tier0_ls_id) + "/" + tier0_ls_inf_id) external_interface_paths.append( external_interface_path) ha_vip_config[ 'external_interface_paths'] = external_interface_paths def update_parent_info(self, parent_info): parent_info["ls_id"] = self.id class NSXTTier1Interface(NSXTBaseRealizableResource): def get_spec_identifier(self): return (NSXTTier1.NSXTTier1LocaleService.NSXTTier1Interface .get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "interfaces" @staticmethod def get_resource_spec(): tier1_ls_int_arg_spec = {} tier1_ls_int_arg_spec.update( ipv6_ndra_profile_id=dict( required=False, type='str' ), mtu=dict( type='int' ), segment_id=dict( required=False, type='str' ), segment_display_name=dict( required=False, type='str' ), subnets=dict( required=True, type='list', elements='dict', options=dict( ip_addresses=dict( type='list', elements='str' ), prefix_len=dict( type='int' ) ) ), urpf_mode=dict( type='str', default='STRICT', choices=['NONE', 'STRICT'] ) ) return tier1_ls_int_arg_spec @staticmethod def get_resource_base_url(parent_info): tier1_id = parent_info.get("tier1_id", 'default') locale_service_id = parent_info.get("ls_id", 'default') return TIER_1_LS_INTERFACE_URL.format( tier1_id, locale_service_id) def update_resource_params(self, nsx_resource_params): # segment_id is a required attr segment_id = self.get_id_using_attr_name_else_fail( "segment", nsx_resource_params, SEGMENT_URL, "Segment") nsx_resource_params["segment_path"] = ( SEGMENT_URL + "/" + segment_id) if self.do_resource_params_have_attr_with_id_or_display_name( "ipv6_ndra_profile"): ipv6_ndra_profile_id = ( self.get_id_using_attr_name_else_fail( "ipv6_ndra_profile", nsx_resource_params, IPV6_NDRA_PROFILE_URL, "Ipv6 NDRA Profile")) nsx_resource_params["ipv6_profile_paths"] = [ IPV6_NDRA_PROFILE_URL + "/" + ipv6_ndra_profile_id] if __name__ == '__main__': nsxt_tier1 = NSXTTier1() nsxt_tier1.realize() ================================================ FILE: plugins/modules/nsxt_principal_identities.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_principal_identities short_description: 'Register a name-certificate combination.' description: "Associates a principal's name with a certificate that is used to authenticate. " version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Identifier to use when displaying entity in logs or GUI' required: true type: str name: description: 'Name of the principal' required: true type: str node_id: description: 'Unique node-id' required: true type: str certificate_name: description: 'Display name of the certificate attached' required: true type: str role: description: 'Role' required: true type: str description: description: 'Description of this resource' required: false type: str resource_type: description: 'Must be set to the value PrincipalIdentity' required: false type: str id: description: 'Unique identifier of this resource' required: false type: str is_protected: description: 'Description of this resource' required: false type: bool tags: description: Opaque identifier meaninful to API user required: false type: Array of Tag state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register a name-certificate combination nsxt_principal_identities: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "Akhilesh_principal_display_name" name: "Akhilesh_principal_name" node_id: "node-1" role: "enterprise_admin" certificate_name: "Akhilesh_cert" state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, get_certificate_string from ansible.module_utils._text import to_native def get_principal_identity_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_principal_identity_update_params(args=None): args_to_remove = ['name', 'node_id', 'certificate_pem', 'role', 'is_protected'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def update_params_with_pem_encoding(principal_id_params): ''' params: Parameters passed to the certificate result: Updated parameters. Files are replaced with the public and private strings. ''' principal_id_params['certificate_pem'] = get_certificate_string (principal_id_params.pop('certificate_pem_file', None)) return principal_id_params def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, principal_id_params ): principal_id_params['certificate_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, '/trust-management/certificates', principal_id_params.pop('certificate_name', None)) return principal_id_params def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] module.fail_json(msg='No id exists with display name %s' % display_name) def get_principal_ids(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/trust-management/principal-identities', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing principal identities. Error [%s]' % (to_native(err))) return resp def get_principal_id_with_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): ''' result: returns the principal id of the display name provided ''' principal_ids = get_principal_ids(module, manager_url, mgr_username, mgr_password, validate_certs) if principal_ids and len(principal_ids['results'])>0: for principal_id in principal_ids['results']: if principal_id.__contains__('display_name') and principal_id['display_name'] == display_name: return principal_id return None def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, display_name, principal_id_params): ''' Checks if principal identity exists, if exists it means we need to update already existing principal identity after checking if there are any differences with respect to existing display name ''' existing_principal_id = get_principal_id_with_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name) if existing_principal_id is None: return False if not existing_principal_id.__contains__('description') and principal_id_params.__contains__('description'): return True if existing_principal_id.__contains__('description') and not principal_id_params.__contains__('description'): return True if existing_principal_id.__contains__('description') and principal_id_params.__contains__('description') and\ existing_principal_id['description'] != principal_id_params['description']: return True if existing_principal_id.__contains__('certificate_id') and principal_id_params.__contains__('certificate_id') and\ existing_principal_id['certificate_id'] != principal_id_params['certificate_id']: return True return False def get_certificate_id_with_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): ''' result: returns the certificate object with the display name provided ''' certificates = get_certificates(module, manager_url, mgr_username, mgr_password, validate_certs) if certificates and certificates['result_count']>0: for certificate in certificates['results']: if certificate.__contains__('display_name') and certificate['display_name'] == display_name: return certificate['id'] return None def main(): argument_spec = dict() argument_spec.update(hostname=dict(type='str', required=True), username=dict(type='str', required=True), password=dict(type='str', required=True, no_log=True), port=dict(type='int', default=443), validate_certs=dict(type='bool', requried=False, default=True), display_name=dict(required=True, type='str'), name=dict(required=True, type='str'), node_id=dict(required=True, type='str'), certificate_name=dict(required=False, type='str'), certificate_pem_file=dict(required=True, type='str', no_log=True), role=dict(required=False, type='str'), description=dict(required=False, type='str'), resource_type=dict(required=False, type='str'), id=dict(required=False, type='str'), is_protected=dict(required=False, type='bool'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) ''' Core function of the module reponsible for adding and deleting the certififcate. ''' module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) principal_id_params = get_principal_identity_params(module.params.copy()) principal_id_params = update_params_with_pem_encoding(principal_id_params) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if principal_id_params.__contains__('certificate_name'): principal_id_params = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, principal_id_params) principal_id_with_display_name = get_principal_id_with_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name) if state == 'present': # update the principal identity if check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, display_name, principal_id_params): if principal_id_with_display_name: principal_id_params['principal_identity_id'] = principal_id_with_display_name['id'] principal_id_params = get_principal_identity_update_params(principal_id_params.copy()) request_data = json.dumps(principal_id_params) try: (rc, resp) = request(manager_url+ '/trust-management/principal-identities?action=update_certificate', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update principal identity. Error[%s]. Request body [%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, result=resp, message="Principal identity updated.") # add the principal identity if principal_id_with_display_name: module.exit_json(changed=False, msg="Principal id with display name \'%s\' already exists." % display_name) request_data = json.dumps(principal_id_params) try: (rc, resp) = request(manager_url+ '/trust-management/principal-identities/with-certificate', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add principal identity. Error[%s]. Request body [%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, result=resp, message="Principal identity created.") elif state == 'absent': # delete the principal identity if not principal_id_with_display_name: module.fail_json(msg="Principal identity with display name \'%s\' doesn't exists." % display_name) principal_id = principal_id_with_display_name['id'] try: (rc, resp) = request(manager_url+ '/trust-management/principal-identities/' + principal_id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to delete principal identity with display name \'%s\'. Error[%s]." % (display_name, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=principal_id, message="Principal identity with display name \'%s\' and principal id \'%s\' deleted." %(display_name, principal_id)) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_principal_identities_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_principal_identities_facts short_description: List all existing principal identities description: Returns the list of principals registered with a certificate. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List all existing certificates nsxt_principal_identities_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/trust-management/principal-identities', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zone. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_repo_sync.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_repo_sync short_description: 'Synchronizes the repository data between nsx managers' description: "Attempts to synchronize the repository partition on nsx manager. Repository partition contains packages required for the install and upgrade of nsx components.Normally there is no need to call this API explicitely by the user." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Synchronizes the repository data between nsx managers nsxt_repo_sync: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' # Synchronize the repository data between nsx managers if module.check_mode: module.exit_json(changed=False, debug_out='The repository data between NSX' ' managers will be synchronized.', id=mgr_hostname) try: (rc, resp) = request(manager_url+ '/cluster/node?action=repo_sync', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Failed to synchronize repositories of NSX ' 'managers. Error[%s].' % to_native(err)) time.sleep(5) module.exit_json(changed=True, result=resp, message='NSX Manager repositories' ' synchronization started.') if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_repo_sync_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_repo_sync_facts short_description: 'Get synchronize status of a manager node' description: "Returns the synchronization status for the manager represented by given ." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str node_name: description: 'Name of auto-deployment node' required: true type: str ''' EXAMPLES = ''' - name: Get repo sync status of an auto deployed node nsxt_repo_sync_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False node_name: "Manager-01" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_id_from_display_name_results from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update(node_name=dict(required=True, type='str')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_node_name = module.params['node_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) manager_node_id = get_id_from_display_name_results(module, manager_url, '/cluster/nodes/deployments', mgr_username, mgr_password, validate_certs, ['deployment_config','hostname'], ['vm_id'], manager_node_name) changed = False try: (rc, resp) = request(manager_url + '/cluster/nodes/%s/repo_sync/status' % manager_node_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing manager node repo sync ' 'status. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_rest.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2020, sky-joker # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: nsxt_rest short_description: Direct access to the NSX REST API description: - Provides direct access to the NSX REST API to execute the API. author: - sky-joker (@sky-joker) requirements: - "python >= 2.7" options: hostname: description: "Deployed NSX manager hostname." required: true type: str username: description: "The username to authenticate with the NSX manager." required: true type: str password: description: "The password to authenticate with the NSX manager." required: true type: str path: description: "URI being used to execute API calls." required: true type: str method: description: - "The HTTP method of the request." required: false choices: - get - post - put - patch - delete default: get type: str src: description: - "The absolute path to the file containing the request body(payload) to be sent to the NSX REST API." - "If this option isn't used, use the C(content) option instead." - "If this option is used, the C(content) option is ignored." required: false type: str content: description: - "The request body(payload) to be sent to the NSX REST API." - "If this option isn't used, use the C(src) option instead." required: false type: raw ''' EXAMPLES = ''' - name: create a new segment nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.1/24" } ], } - name: get segment information nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: get path: /policy/api/v1/infra/segments/segment register: get_segment_information_result - name: delete a segment nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: delete path: /policy/api/v1/infra/segments/segment ''' RETURN = ''' body: description: dictionary of requested result information returned: always type: dict sample: { "_create_time": 1588405512111, "_create_user": "admin", "_last_modified_time": 1588405613884, "_last_modified_user": "admin", "_protection": "NOT_PROTECTED", "_revision": 1, "_system_owned": false, "admin_state": "UP", "display_name": "segment", "id": "segment", "marked_for_delete": false, "overridden": false, "parent_path": "/infra", "path": "/infra/segments/segment", "relative_path": "segment", "replication_mode": "MTEP", "resource_type": "Segment", "subnets": [ { "gateway_address": "192.168.0.1/24", "network": "192.168.0.0/24" } ], "type": "DISCONNECTED", "unique_id": "0361313c-20f0-42ba-aa77-e090277a50ac" } ''' import os import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec from ansible.module_utils.urls import basic_auth_header, fetch_url class VMwareNSXTRest(): def __init__(self, module): self.module = module self.mgr_hostname = module.params["hostname"] self.mgr_username = module.params["username"] self.mgr_password = module.params["password"] self.path = module.params["path"] self.method = module.params["method"] self.src = module.params["src"] self.content = module.params["content"] self.manager_url = "https://{}".format(self.mgr_hostname) self.headers = { "authorization": basic_auth_header(self.mgr_username, self.mgr_password), "Accept": "application/json", "Content-Type": "application/json" } if self.src: if os.path.isfile(self.src): try: with open(self.src, "r") as f: self.content = json.loads(f.read()) except Exception as err: self.module.fail_json(msg="src read error: %s" % err) else: self.module.fail_json(msg="cannot find/access src '%s'" % self.src) def error_code_check(self, info): status = info.get('status') if status >= 400: self.module.fail_json(msg="error_code: %s, error_message: %s" % (status, json.loads(info.get('body'))['error_message'])) if status == -1: self.module.fail_json(msg="error_code: %s, error_message: %s" % (status, info.get('msg'))) def operate_nsxt(self, method, ignore_errors=False): try: (resp, info) = fetch_url(self.module, self.manager_url + self.path, method=method.upper(), headers=self.headers, data=json.dumps(self.content)) except Exception as err: self.module.fail_json(msg="nsxt rest api request error: %s, error url: %s" % (err, self.manager_url + self.path)) if ignore_errors is False: self.error_code_check(info) resp_body = resp.read() if 'read' in dir(resp) else False if resp_body: return json.loads(resp_body) else: return "" def execute(self): if self.method == "get": resp = self.operate_nsxt(method=self.method) self.module.exit_json(changed=False, body=resp) if self.method == "post" or self.method == "put" or self.method == "patch": before_resp = self.operate_nsxt(method="get", ignore_errors=True) if before_resp: before_revision = before_resp["_revision"] else: before_revision = "" _ = self.operate_nsxt(method=self.method) after_resp = self.operate_nsxt(method="get") after_revision = after_resp["_revision"] if before_revision == after_revision: self.module.exit_json(changed=False, body=after_resp) else: self.module.exit_json(changed=True, body=after_resp) if self.method == "delete": resp = self.operate_nsxt(method="get", ignore_errors=True) if resp: resp = self.operate_nsxt(method=self.method) self.module.exit_json(changed=True, body=resp) else: self.module.exit_json(changed=False, body=resp) def main(): argument_spec = vmware_argument_spec() argument_spec.update(path=dict(type='str', required=True), method=dict(type='str', choices=['get', 'post', 'put', 'patch', 'delete'], default='get'), src=dict(type='str'), content=dict(type='raw'),) module = AnsibleModule(argument_spec, supports_check_mode=True) vmware_nsx_rest = VMwareNSXTRest(module) vmware_nsx_rest.execute() if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_route_advertise.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': 'xx', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_route_advertise short_description: 'Toggle tier 1 route advertisement' description: "Toggle route advertisement on Tier 1 routers" version_added: '2.7' author: 'Matt Proud' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str display_name: description: 'Display name of Tier 1 router' required: true type: str enabled: description: 'Flag to enable this configuration' type: boolean required: false advertise_static_routes: description: 'Flag to advertise all static routes' required: false type: boolean advertise_dns_forwarder: description: 'Flag to advertise all routes of dns forwarder listener ips and source ips' required: false type: boolean advertise_lb_snat_ip: description: 'Flag to advertise all lb SNAT ips' required: false type: boolean advertise_lb_vip: description: 'Flag to advertise lb vips' required: false type: boolean advertise_nat_routes: description: 'Flag to advertise all routes of nat' required: false type: boolean advertise_nsx_connected_routes: description: 'Flag to advertise all connected routes' required: false type: boolean ''' EXAMPLES = ''' - name: Toggle tier 1 route advertisement nsxt_route_advertise: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False enabled: True advertise_static_routes: True advertise_dns_forwarder: True advertise_lb_snat_ip: True advertise_lb_vip: True advertise_nat_routes: True advertise_nsx_connected_routes: True ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_advertise_params(args=None): args_to_remove = ['username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_logical_routers(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/logical-routers', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing logical routers. Error [%s]' % (to_native(err))) return resp def get_lr_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): logical_routers = get_logical_routers(module, manager_url, mgr_username, mgr_password, validate_certs) for logical_router in logical_routers['results']: if logical_router.__contains__('display_name') and logical_router['display_name'] == display_name: return logical_router return None def get_revision(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_id): try: (rc, resp) = request(manager_url+ '/logical-routers/%s/routing/advertisement' % logical_router_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) return resp['_revision'] except Exception as err: module.fail_json(msg='Error accessing current advertisement. Error [%s]' % (to_native(err))) def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), enabled=dict(required=False, type='bool'), advertise_static_routes=dict(required=False, type='bool'), advertise_dns_forwarder=dict(required=False, type='bool'), advertise_lb_snat_ip=dict(required=False, type='bool'), advertise_lb_vip=dict(required=False, type='bool'), advertise_nat_routes=dict(required=False, type='bool'), advertise_nsx_connected_routes=dict(required=False, type='bool') ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) advertise_params = get_advertise_params(module.params.copy()) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) logical_router_dict = get_lr_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) logical_router_id = None if logical_router_dict: logical_router_id = logical_router_dict['id'] advertise_params['_revision'] = get_revision(module, manager_url, mgr_username, mgr_password, validate_certs, logical_router_id) # update current revision headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' # add the pool if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(advertise_params)), id='12345') request_data = json.dumps(advertise_params) try: (rc, resp) = request(manager_url+ '/logical-routers/%s/routing/advertisement' % logical_router_id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to toggle config. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="Router advertisement set for display name %s." % module.params['display_name']) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_node_collections.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_node_collections short_description: Create transport node collection by attaching Transport Node Profile to cluster. description: "When transport node collection is created the hosts which are part of compute collection will be prepared automatically i.e. NSX Manager attempts to install the NSX components on hosts. Transport nodes for these hosts are created using the configuration specified in transport node profile." version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str cluster_name: description: CLuster Name required: false type: str compute_manager_name: description: Cluster Manager Name required: false type: str description: description: Description required: true type: str display_name: description: Display name required: true type: str resource_type: description: "Must be set to the value TransportNodeCollection" required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true transport_node_profile_name: description: Transport Node Profile Names required: true type: str ''' EXAMPLES = ''' - name: Create transport node collection nsxt_transport_node_collections: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "TNC1" resource_type: "TransportNodeCollection" description: "Transport Node Collections 1" compute_manager_name: "VC1" cluster_name: "cl1" transport_node_profile_name: "TNP1" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_transport_node_collections_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_transport_node_collections(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/transport-node-collections', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport-node-collections. Error [%s]' % (to_native(err))) return resp def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def get_transport_node_collection_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): transport_node_collections = get_transport_node_collections(module, manager_url, mgr_username, mgr_password, validate_certs) for transport_node_collection in transport_node_collections['results']: if transport_node_collection.__contains__('display_name') and transport_node_collection['display_name'] == display_name: return transport_node_collection return None def wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs): try: while True: (rc, resp) = request(manager_url+ '/transport-node-collections/%s'% id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) time.sleep(10) except Exception as err: time.sleep(5) return def get_transport_node_profile_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_name): try: return get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-node-profiles", transport_node_profile_name) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (transport_node_profile_name, to_native(err))) def get_compute_collection_id (module, manager_url, mgr_username, mgr_password, validate_certs, manager_name, cluster_name): try: (rc, resp) = request(manager_url+ '/fabric/compute-collections', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) compute_manager_id = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/fabric/compute-managers", manager_name) except Exception as err: module.fail_json(msg='Error accessing compute collection id for manager %s, cluster %s. Error [%s]' % (manager_name, cluster_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == cluster_name and \ result['origin_id'] == compute_manager_id: return result['external_id'] module.fail_json(msg='No compute collection id exist with cluster name %s for compute manager %s' % (cluster_name, manager_name)) def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_params ): compute_manager_name = transport_node_collection_params.pop('compute_manager_name', None) compute_cluster_name = transport_node_collection_params.pop('cluster_name', None) compute_collection_id = get_compute_collection_id (module, manager_url, mgr_username, mgr_password, validate_certs, compute_manager_name, compute_cluster_name) transport_node_collection_params['compute_collection_id'] = compute_collection_id transport_node_profile_name = transport_node_collection_params.pop('transport_node_profile_name', None) transport_node_profile_id = get_transport_node_profile_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_name) transport_node_collection_params['transport_node_profile_id'] = transport_node_profile_id return transport_node_collection_params def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_with_ids): existing_tnc = get_transport_node_collection_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collection_with_ids['display_name']) if existing_tnc is None: return False if existing_tnc['compute_collection_id'] == transport_node_collection_with_ids['compute_collection_id'] and \ existing_tnc['transport_node_profile_id'] != \ transport_node_collection_with_ids['transport_node_profile_id']: return True if existing_tnc.__contains__('description') and not transport_node_collection_with_ids.__contains__('description'): return True if not existing_tnc.__contains__('description') and transport_node_collection_with_ids.__contains__('description'): return True if existing_tnc.__contains__('description') and transport_node_collection_with_ids.__contains__('description') and \ existing_tnc['description'] != transport_node_collection_with_ids['description']: return True if existing_tnc.__contains__('transport_node_profile_name') and \ transport_node_collection_with_ids.__contains__('transport_node_profile_name') \ and existing_tnc['transport_node_profile_name'] != \ transport_node_collection_with_ids['transport_node_profile_name']: return True if existing_tnc.__contains__('tags') and not transport_node_collection_with_ids.__contains__('tags'): return True if not existing_tnc.__contains__('tags') and transport_node_collection_with_ids.__contains__('tags'): return True if existing_tnc.__contains__('tags') and transport_node_collection_with_ids.__contains__('tags') and \ (not compareTags(existing_tnc, transport_node_collection_with_ids)): return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=True, type='str'), resource_type=dict(required=True, type='str'), transport_node_profile_name=dict(required=True, type='str'), compute_manager_name=dict(required=False, type='str'), cluster_name=dict(required=False, type='str'), state=dict(required=True, choices=['present', 'absent']), tags=dict(required=False, type='list')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) transport_node_collections_params = get_transport_node_collections_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) transport_node_collections_dict = get_transport_node_collection_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) transport_node_collection_id, revision = None, None if transport_node_collections_dict: transport_node_collection_id = transport_node_collections_dict['id'] revision = transport_node_collections_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_collections_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the transport_node_collections request_data = json.dumps(transport_node_collections_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: if transport_node_collection_id: module.exit_json(changed=False, id=transport_node_collection_id, message="transport-node-collection with display_name %s already exist on cluster %s." % (module.params['display_name'], module.params['cluster_name'])) (rc, resp) = request(manager_url+ '/transport-node-collections', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add transport_node_collections. Request body [%s]. Error[%s]." % (request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport-node-collection created for cluster %s." % module.params['cluster_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_collections_params)), id=transport_node_collection_id) transport_node_collections_params['_revision'] = revision # update current revision request_data = json.dumps(transport_node_collections_params) id = transport_node_collection_id try: (rc, resp) = request(manager_url+ '/transport-node-collections/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update transport_node_collections with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport-node-collection with Compute collection fabric template id %s updated." % id) elif state == 'absent': # delete the array id = transport_node_collection_id if id is None: module.exit_json(changed=False, msg='No transport-node-collection exist with display_name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_collections_params)), id=id) try: (rc, resp) = request(manager_url + "/transport-node-collections/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete transport-node-collection with name %s. Error[%s]." % (display_name, to_native(err))) wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=id, message="transport-node-collection with name %s deleted." % display_name) def compareTags(existing_tnc, new_tnc): return ordered(existing_tnc['tags']) == ordered(new_tnc['tags']) def ordered(obj): if isinstance(obj, dict): return sorted((k, ordered(v)) for k, v in obj.items()) if isinstance(obj, list): return sorted(ordered(x) for x in obj) else: return obj if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_node_collections_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_node_collections_facts short_description: List Transport Node collections description: Returns all Transport Node collections version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Transport Node collections nsxt_fabric_compute_managers_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils.urls import open_url, fetch_url from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/transport-node-collections', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport-node-collections. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_node_profiles.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_node_profiles short_description: Create a Transport Node Profile description: "Transport node profile captures the configuration needed to create a transport node. A transport node profile can be attached to compute collections for automatic TN creation of member hosts." version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str description: description: Description of the pre/post-upgrade check required: false type: str display_name: description: Display name required: true type: str host_switch_spec: description: 'The HostSwitchSpec is the base class for standard and preconfigured host switch specifications. Only standard host switches are supported in the transport node profile.' host_switches: description: Transport Node host switches required: true type: array of HostSwitch required: false resource_type: description: Selects the type of the transport zone profile required: true type: str type: dict resource_type: description: Selects the type of the transport zone profile required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Create transport node profile nsxt_transport_node_profiles: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False resource_type: "TransportNodeProfile" display_name: "NSX Configured TNP" description: "NSX configured Test Transport Node Profile" host_switch_spec: resource_type: "StandardHostSwitchSpec" host_switches: - host_switch_profiles: - name: "uplinkProfile1" type: "UplinkHostSwitchProfile" host_switch_name: "hostswitch1" host_switch_mode: "STANDARD" pnics: - device_name: "vmnic1" uplink_name: "uplink-1" ip_assignment_spec: resource_type: "StaticIpPoolSpec" ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" vmk_install_migration: - device_name: vmk0 destination_network_name: "ls_vmk_Mgmt" state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native FAILED_STATES = ["failed"] IN_PROGRESS_STATES = ["pending", "in_progress"] SUCCESS_STATES = ["partial_success", "success"] FABRIC_VIRTUAL_SWITCH_TYPE = ["VDS"] def get_transport_node_profile_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_transport_node_profiles(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/transport-node-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport node profiles. Error [%s]' % (to_native(err))) return resp def get_host_switch_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['uuid'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def get_tnp_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): transport_node_profiles = get_transport_node_profiles(module, manager_url, mgr_username, mgr_password, validate_certs) for transport_node_profile in transport_node_profiles['results']: if transport_node_profile.__contains__('display_name') and transport_node_profile['display_name'] == display_name: return transport_node_profile return None def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_params ): for host_switch in transport_node_profile_params['host_switch_spec']['host_switches']: if host_switch.__contains__('host_switch_type') and host_switch['host_switch_type'] in FABRIC_VIRTUAL_SWITCH_TYPE: if host_switch.__contains__('host_switch_name'): host_switch['host_switch_id'] = get_host_switch_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, '/fabric/virtual-switches', host_switch['host_switch_name']) else: module.fail_json(msg='Failing as host_switch_name is not provided for host switch of type: %s' % host_switch['host_switch_type']) host_switch_profiles = host_switch.pop('host_switch_profiles', None) host_switch_profile_ids = [] for host_switch_profile in host_switch_profiles: profile_obj = {} profile_obj['value'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/host-switch-profiles?include_system_owned=true", host_switch_profile['name']) profile_obj['key'] = host_switch_profile['type'] host_switch_profile_ids.append(profile_obj) host_switch['host_switch_profile_ids'] = host_switch_profile_ids ip_pool_id = None if host_switch.__contains__('ip_assignment_spec'): if host_switch['ip_assignment_spec'].__contains__('ip_pool_name'): ip_pool_name = host_switch['ip_assignment_spec'].pop('ip_pool_name', None) host_switch['ip_assignment_spec']['ip_pool_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/pools/ip-pools", ip_pool_name) if host_switch.__contains__('transport_zone_endpoints'): for transport_zone_endpoint in host_switch['transport_zone_endpoints']: transport_zone_name = transport_zone_endpoint.pop('transport_zone_name', None) transport_zone_endpoint['transport_zone_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-zones", transport_zone_name) if host_switch.__contains__('vmk_install_migration'): for vmk_install_migration in host_switch['vmk_install_migration']: if vmk_install_migration.__contains__('destination_network_name'): destination_network_name = vmk_install_migration.pop('destination_network_name', None) vmk_install_migration['destination_network'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/logical-switches", destination_network_name) if transport_node_profile_params.__contains__('transport_zone_endpoints'): for transport_zone_endpoint in transport_node_profile_params['transport_zone_endpoints']: transport_zone_name = transport_zone_endpoint.pop('transport_zone_name', None) transport_zone_endpoint['transport_zone_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-zones", transport_zone_name) transport_node_profile_params['display_name'] = transport_node_profile_params.pop('display_name', None) return transport_node_profile_params def id_exist_in_list_dict_obj(key, list_obj1, list_obj2): all_id_presents = False if len(list_obj1) != len(list_obj2): return all_id_presents for dict_obj1 in list_obj1: if dict_obj1.__contains__(key): for dict_obj2 in list_obj2: if dict_obj2.__contains__(key) and dict_obj1[key] == dict_obj2[key]: all_id_presents = True continue if not all_id_presents: return False return True def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_with_ids): existing_transport_node_profile = get_tnp_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_with_ids['display_name']) if existing_transport_node_profile is None: return False if existing_transport_node_profile.__contains__('transport_zone_endpoints') and transport_node_profile_with_ids.__contains__('transport_zone_endpoints'): return not id_exist_in_list_dict_obj('transport_zone_id', existing_transport_node_profile['transport_zone_endpoints'], transport_node_profile_with_ids['transport_zone_endpoints']) if existing_transport_node_profile.__contains__('host_switch_spec') and existing_transport_node_profile['host_switch_spec'].__contains__('host_switches') and \ transport_node_profile_with_ids.__contains__('host_switch_spec') and transport_node_profile_with_ids['host_switch_spec'].__contains__('host_switches') and \ existing_transport_node_profile['host_switch_spec']['host_switches'] != transport_node_profile_with_ids['host_switch_spec']['host_switches']: return True if existing_transport_node_profile.__contains__('tags') and not transport_node_profile_with_ids.__contains__('tags'): return True if not existing_transport_node_profile.__contains__('tags') and transport_node_profile_with_ids.__contains__('tags'): return True if existing_transport_node_profile.__contains__('tags') and transport_node_profile_with_ids.__contains__('tags') and (not compareTags(existing_transport_node_profile, transport_node_profile_with_ids)): return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), description=dict(required=False, type='str'), host_switch_spec=dict(required=False, type='dict', host_switches=dict(required=True, type='list'), resource_type=dict(required=True, type='str')), resource_type=dict(required=True, type='str'), transport_zone_endpoints=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent']), tags=dict(required=False, type='list')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) transport_node_profile_params = get_transport_node_profile_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) transport_node_profile_dict = get_tnp_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) transport_node_profile_id, revision = None, None if transport_node_profile_dict: transport_node_profile_id = transport_node_profile_dict['id'] revision = transport_node_profile_dict['_revision'] if state == 'present': body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_profile_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the node if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_switch_params)), id='12345') request_data = json.dumps(body) try: if not transport_node_profile_id: transport_node_profile_id = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, '/transport-node-profiles', display_name, exit_if_not_found=False) if transport_node_profile_id: module.exit_json(changed=False, id=transport_node_profile_id, message="Transport node profile with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/transport-node-profiles', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add transport node profile. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport node profile with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id=transport_node_profile_id) body['_revision'] = revision # update current revision request_data = json.dumps(body) id = transport_node_profile_id try: (rc, resp) = request(manager_url+ '/transport-node-profiles/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update transport node profile with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="transport node profile with node id %s updated." % id) elif state == 'absent': # delete the array id = transport_node_profile_id if id is None: module.exit_json(changed=False, msg='No transport node profile exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_profile_params)), id=id) try: (rc, resp) = request(manager_url + "/transport-node-profiles/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete transport node profile with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="transport node profile with node id %s deleted." % id) def compareTags(existing_tnp, new_tnp): return ordered(existing_tnp['tags']) == ordered(new_tnp['tags']) def ordered(obj): if isinstance(obj, dict): return sorted((k, ordered(v)) for k, v in obj.items()) if isinstance(obj, list): return sorted(ordered(x) for x in obj) else: return obj if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_node_profiles_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_node_profiles_facts short_description: List Transport Nodes Profiles description: Returns information about all transport node profiles. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Transport Node Profiles nsxt_transport_node_profiles_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/transport-node-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport node profiles. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_nodes.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_nodes short_description: Create a Transport Node description: "Transport nodes are hypervisor hosts and NSX Edges that will participate in an NSX-T overlay. For a hypervisor host, this means that it hosts VMs that will communicate over NSX-T logical switches. For NSX Edges, this means that it will have logical router uplinks and downlinks. This API creates transport node for a host node (hypervisor) or edge node (router) in the transport network. When you run this command for a host, NSX Manager attempts to install the NSX kernel modules, which are packaged as VIB, RPM, or DEB files. For the installation to succeed, you must provide the host login credentials and the host thumbprint. To get the ESXi host thumbprint, SSH to the host and run the openssl x509 -in /etc/vmware/ssl/rui.crt -fingerprint -sha256 -noout command. To generate host key thumbprint using SHA-256 algorithm please follow the steps below. Log into the host, making sure that the connection is not vulnerable to a man in the middle attack. Check whether a public key already exists. Host public key is generally located at '/etc/ssh/ssh_host_rsa_key.pub'. If the key is not present then generate a new key by running the following command and follow the instructions. ssh-keygen -t rsa Now generate a SHA256 hash of the key using the following command. Please make sure to pass the appropriate file name if the public key is stored with a different file name other than the default 'id_rsa.pub'. awk '{print $2}' id_rsa.pub | base64 -d | sha256sum -b | sed 's/ .*$//' | xxd -r -p | base64 This api is deprecated as part of FN+TN unification. Please use Transport Node API to install NSX components on a node. Additional documentation on creating a transport node can be found in the NSX-T Installation Guide. In order for the transport node to forward packets, the host_switch_spec property must be specified. Host switches (called bridges in OVS on KVM hypervisors) are the individual switches within the host virtual switch. Virtual machines are connected to the host switches. When creating a transport node, you need to specify if the host switches are already manually preconfigured on the node, or if NSX should create and manage the host switches. You specify this choice by the type of host switches you pass in the host_switch_spec property of the TransportNode request payload. For a KVM host, you can preconfigure the host switch, or you can have NSX Manager perform the configuration. For an ESXi host or NSX Edge node, NSX Manager always configures the host switch. To preconfigure the host switches on a KVM host, pass an array of PreconfiguredHostSwitchSpec objects that describes those host switches. In the current NSX-T release, only one prefonfigured host switch can be specified. See the PreconfiguredHostSwitchSpec schema definition for documentation on the properties that must be provided. Preconfigured host switches are only supported on KVM hosts, not on ESXi hosts or NSX Edge nodes. To allow NSX to manage the host switch configuration on KVM hosts, ESXi hosts, or NSX Edge nodes, pass an array of StandardHostSwitchSpec objects in the host_switch_spec property, and NSX will automatically create host switches with the properties you provide. In the current NSX-T release, up to 5 host switches can be automatically managed. See the StandardHostSwitchSpec schema definition for documentation on the properties that must be provided. Note: previous versions of NSX-T used a property named host_switches to specify the host switch configuration on the transport node. That property is deprecated, but still functions. You should configure new host switches using the host_switch_spec property. The request should either provide node_deployement_info or node_id. If the host node (hypervisor) or edge node (router) is already added in system then it can be converted to transport node by providing node_id in request. If host node (hypervisor) or edge node (router) is not already present in system then information should be provided under node_deployment_info." version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str display_name: description: Display name required: true type: str description: description: Description of this resource required: False type: str resource_type: description: Must be set to the value TransportNode required: False type: str host_switch_spec: description: 'This property is used to either create standard host switches or to inform NSX about preconfigured host switches that already exist on the transport node. Pass an array of either StandardHostSwitchSpec objects or PreconfiguredHostSwitchSpec objects. It is an error to pass an array containing different types of HostSwitchSpec objects.' host_switches: description: This property is deprecated in favor of 'host_switch_spec'. Property 'host_switches' can only be used for NSX managed transport nodes. 'host_switch_spec' can be used for both NSX managed or manually preconfigured host switches. required: true type: array of PreconfiguredHostSwitch required: false resource_type: description: Selects the type of the transport zone profile required: true type: str type: dict maintenance_mode: description: The property is read-only, used for querying result. User could update transport node maintenance mode by UpdateTransportNodeMaintenanceMode call. required: false type: str node_deployment_info: display_name: description: Identifier to use when displaying entity in logs or GUI This field is deprecated. TransportNode field 'display_name' must be used instead. For HostNode, this field defaults to ID if not set. For EdgeNode and PublicCloudGatewayNode, this field is ignored if specified in request payload. required: false type: string allocation_list: description: List of logical router ids to which this edge node is allocated. required: false type: list deployment_config: description: 'When this configuration is specified, edge fabric node of deployment_type VIRTUAL_MACHINE will be deployed and registered with MP.' form_factor: description: Supported edge form factor. required: false type: str node_user_settings: audit_password: description: "Password for the node audit user. For deployment, this property is required. After deployment, this property is ignored, and the node cli must be used to change the password. The password specified must be at least 12 characters in length and must contain at least one lowercase, one uppercase, one numeric character and one special character (except quotes)." required: false type: str audit_username: description: "The default username is 'audit'. To configure username, you must provide this property together with audit_password." required: false type: str cli_password: description: "Password for the node cli user. For deployment, this property is required. After deployment, this property is ignored, and the node cli must be used to change the password. The password specified must be at least 12 characters in length and must contain at least one lowercase, one uppercase, one numeric character and one special character (except quotes)." required: false type: str cli_username: description: "To configure username, you must provide this property together with cli_password." required: false type: str description: "Username and password settings for the node. Note - these settings will be honored only during node deployment. Post deployment, CLI must be used for changing the user settings, changes to these parameters will not have any effect." required: true root_password: description: "Password for the node root user. For deployment, this property is required. After deployment, this property is ignored, and the node cli must be used to change the password. The password specified must be at least 12 characters in length and must contain at least one lowercase, one uppercase, one numeric character and one special character (except quotes)." required: false type: str type: dict required: false type: dict vm_deployment_config: ipv4_assignment_enabled: description: 'Its a boolean flag, if assigned as false then Edge TN would be created using Static Ipv6 only. This field is deprecated.' required: false type: boolean ipv6_assignment_type: description: 'IPv6 assignment type e.g STATIC, DHCPV6, SLAAC. if enum value is STATIC then management_port_subnets is mandatory. In this iteration DHCPV6 and SLAAC are not supported.' required: false type: str compute: description: 'The cluster node VM will be deployed on the specified cluster or resourcepool for specified VC server. If vc_username and vc_password are present then this field takes name else id.' required: true type: str data_networks: description: "List of distributed portgroup or VLAN logical identifiers or names to which the datapath serving vnics of edge node vm will be connected. If vc_username and vc_password are present then this field takes names else id." required: true type: list ignore_ssl_connection: description: 'This is a boolean value which will work as a flag to control whether SSL should be used while connecting to VC or not. If this is True, SSL will not be used to connect to VC. If the value is False, SSL will be used to connect to the VC. If this parameter is not specified, then it will be True.' required: false type: boolean default_gateway_addresses: description: 'The default gateway for the VM to be deployed must be specified if all the other VMs it communicates with are not in the same subnet. Do not specify this field and management_port_subnets to use DHCP. Note: only single IPv4 default gateway address is supported and it must belong to management network. IMPORTANT: VMs deployed using DHCP are currently not supported, so this parameter should be specified.' required: false type: list description: VM Deployment Configuration host: description: "Name of the host where edge VM is to be deployed if vc_username and vc_password are present then this field takes host name else host id." required: false type: str management_network: description: 'Distributed portgroup identifier to which the management vnic of cluster node VM will be connected. If vc_username and vc_password are present then this field takes name else id.' required: true type: str management_port_subnets: description: 'IP Address and subnet configuration for the management port. Do not specify this field and default_gateway_addresses to use DHCP. Note: only one IPv4 address is supported for the management port. IMPORTANT: VMs deployed using DHCP are currently not supported, so this parameter should be specified.' required: false type: array of IPSubnet placement_type: description: "Specifies the config for the platform through which to deploy the VM" required: true type: str required: true storage: description: Moref or name of the datastore in VC. If it is to be taken from 'Agent VM Settings', then it should be empty If vc_username and vc_password are present then this field takes name else id. required: true type: str type: dict vc_name: description: 'The VC-specific names will be resolved on this VC, so all other identifiers specified in the config must belong to this vCenter server.' required: true type: str vc_username: description: 'Username of VC' required: false type: str vc_password: description: 'VC Password' required: false type: str reservation_info: description: 'Resource reservation for memory and CPU resources' required: false type: dict cpu_reservation: description: 'Guaranteed minimum allocation of CPU resources' required: false type: dict reservation_in_mhz: description: 'GCPU resevation in mhz' required: false type: int reservation_in_shares: description: 'CPU reservation in shares' required: false type: str memory_reservation: description: 'Guaranteed minimum allocation of memory resources' required: false type: dict reservation_percentage: description: 'Memory reservation percentage' required: false type: int resource_allocation: description: 'Resource reservation settings' required: false type: dict cpu_count: description: 'CPU count' required: false type: int memory_allocation_in_mb: description: 'Memory allocation in MB' required: false type: int node_settings: description: "Current configuration on edge node Reports the current configuration of the SSH, DHS, NTP and host name on this edge node. The deployment_config property is used during deployment and this counterpart property shows current values." required: false type: dict allow_ssh_root_login: description: "Allowing root SSH logins is not recommended for security reasons. Edit of this property is not supported when updating transport node. Use the CLI to change this property." required: false type: boolean dns_servers: description: "List of DNS servers." required: false type: list enable_ssh: description: "Enabling SSH service is not recommended for security reasons." required: false type: boolean hostname: description: "Enabling SSH service is not recommended for security reasons." required: true type: string advanced_configuration: description: "Array of additional specific properties for advanced or cloud- specific deployments in key-value format." required: false type: list ntp_servers: description: "List of NTP servers." required: false type: list search_domains: description: "List of domain names that are used to complete unqualified host names." required: false type: list syslog_servers: description: "List of Syslog server configuration." required: false type: list deployment_type: description: Specifies whether the service VM should be deployed on each host such that it provides partner service locally on the host, or whether the service VMs can be deployed as a cluster. If deployment_type is CLUSTERED, then the clustered_deployment_count should be provided. required: false type: str description: None discovered_ip_addresses: description: Discovered IP Addresses of the fabric node, version 4 or 6 required: false type: list discovered_node_id: description: Id of discovered node which was converted to create this node required: false type: str external_id: description: Current external id of this virtual machine in the system. required: false type: str fqdn: description: Domain name the entity binds to required: false type: str host_credential: description: Login credentials for the host password: description: Password for the user (optionally specified on PUT, unspecified on GET) required: false type: str required: false thumbprint: description: Hexadecimal SHA256 hash of the vIDM server's X.509 certificate required: false type: str type: dict username: description: Username value of the log required: false type: str ip_addresses: description: Interface IP addresses required: false type: array of IPv4Address managed_by_server: description: The id of the vCenter server managing the ESXi type HostNode required: false type: str os_type: description: OS type of the discovered node required: true type: str os_version: description: OS version of the discovered node required: false type: str required: false resource_type: description: Selects the type of the transport zone profile required: true type: str type: dict remote_tunnel_endpoint: description: Configuration for a remote tunnel endpoin required: False type: 'dict' host_switch_name: description: The host switch name to be used for the remote tunnel endpoint required: True type: 'str' named_teaming_policy: description: The named teaming policy to be used by the remote tunnel endpoint required: False type: 'str' rtep_vlan: description: VLAN id for remote tunnel endpoint required: True type: 'dict' VlanID: description: Virtual Local Area Network Identifier required: False type: 'int' ip_assignment_spec: description: Specification for IPs to be used with host switch remote tunnel endpoints required: True type: 'dict' resource_type: description: Resource type required: True type: 'str' ip_pool_id: description: IP pool id required: False type: 'str' ip_list: description: List of IPs for transport node host switch virtual tunnel endpoints required: False type: 'list' ip_mac_list: description: List of IPs and MACs for transport node host switch virtual tunnel endpoints required: False type: 'list' default_gateway: description: Default gateway required: False type: 'dict' IPAddress: description: IPv4 or IPv6 address required: False type: 'str' subnet_mask: description: Subnet mask required: False type: 'dict' IPAddress: description: IPv4 IPv6 address required: False type: 'str' tags: description: Opaque identifiers meaningful to the API user required: False type: array of Tag state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Create transport node nsxt_transport_nodes: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False resource_type: "TransportNode" display_name: "NSX Configured TN" description: "NSX configured Test Transport Node" host_switch_spec: resource_type: "StandardHostSwitchSpec" host_switches: - host_switch_profiles: - name: "uplinkProfile1" type: "UplinkHostSwitchProfile" host_switch_name: "hostswitch1" pnics: - device_name: "vmnic1" uplink_name: "uplink-1" ip_assignment_spec: resource_type: "StaticIpPoolSpec" ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" node_deployment_info: resource_type: "HostNode" display_name: "Host_1" ip_addresses: ["10.149.55.21"] os_type: "ESXI" os_version: "6.5.0" host_credential: username: "root" password: "ca$hc0w" thumbprint: "e7fd7dd84267da10f991812ca62b2bedea3a4a62965396a04728da1e7f8e1cb9" state: "present" - name: Create edge transport nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{mgr_0_ip_address}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{display_name}}" description: "{{description}}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: - host_switch_profiles: - name: "{{uplink_profile_name}}" type: UplinkHostSwitchProfile host_switch_name: "{{host_switch_name}}" host_switch_mode: "{{host_switch_mode}}" pnics: - device_name: "{{device_name}}" uplink_name: "{{uplink_name}}" ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "{{ip_pool_display_name}}" transport_zone_endpoints: - transport_zone_name: "{{transport_zone_display_name_1}}" - transport_zone_name: "{{transport_zone_display_name_2}}" node_deployment_info: resource_type: EdgeNode display_name: "{{node_display_name}}" deployment_type: VIRTUAL_MACHINE fqdn: "{{ip_address}}" ip_addresses: - "{{ip_address}}" node_settings: allow_ssh_root_login: "{{allow_ssh_root_login}}" enable_ssh: "{{enable_ssh}}" dns_servers: - "{{dns_server}}" ntp_servers: - "{{ntp_server}}" hostname: "{{node_display_name}}" search_domains: - "{{search_domains}}" tags: - tag: "{{edge_tag}}" scope: "{{edge_scope}}" deployment_config: form_factor: "{{form_factor}}" node_user_settings: cli_password: "{{password}}" root_password: "{{password}}" vm_deployment_config: placement_type: VsphereDeploymentConfig vc_name: "{{prod_vc_display_name}}" vc_username: "{{prod_vc_username}}" vc_password: "{{prod_vc_password}}" host: "{{prod_esx_ip}}" compute: "{{prod_vc_cluster}}" storage: "{{prod_vc_datastore}}" management_network: "{{prod_vc_portgroup}}" data_networks: - "{{prod_vc_portgroup}}" - "{{prod_vc_portgroup}}" - "{{prod_vc_portgroup}}" management_port_subnets: - ip_addresses: - "{{ip_address}}" prefix_length: "{{prefix_length}}" default_gateway_addresses: - "{{gateway}}" reservation_info: cpu_reservation: reservation_in_mhz: "{{reservation_in_mhz}}" reservation_in_shares: "{{reservation_in_shares}}" memory_reservation: reservation_percentage: "{{reservation_percentage}}" resource_allocation: cpu_count: "{{cpu_count}}" memory_allocation_in_mb: "{{memory_allocation_in_mb}}" tags: - tag: "{{edge_tn_tag}}" scope: "{{edge_tn_scope}}" state: "{{state}}" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request, get_vc_ip_from_display_name from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vcenter_utils import get_resource_id_from_name, get_data_network_id_from_name from ansible.module_utils._text import to_native import socket import hashlib import ssl import ipaddress FAILED_STATES = ["failed"] IN_PROGRESS_STATES = ["pending", "in_progress"] SUCCESS_STATES = ["partial_success", "success", "NODE_READY"] FABRIC_VIRTUAL_SWITCH_TYPE = ["VDS"] def get_transport_node_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_transport_nodes(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/transport-nodes', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport nodes. Error [%s]' % (to_native(err))) return resp def get_discovered_nodes(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url + '/fabric/discovered-nodes', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing discovered nodes. Error [%s]' % (to_native(err))) return resp def get_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url+ endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['id'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def get_tn_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): transport_nodes = get_transport_nodes(module, manager_url, mgr_username, mgr_password, validate_certs) for transport_node in transport_nodes['results']: if transport_node.__contains__('display_name') and transport_node['display_name'] == display_name: return transport_node return None def get_dn_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): discovered_nodes = get_discovered_nodes(module, manager_url, mgr_username, mgr_password, validate_certs) for discovered_node in discovered_nodes['results']: if discovered_node.__contains__('display_name') and discovered_node['display_name'] == display_name: return discovered_node return None def get_host_switch_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, endpoint, display_name, exit_if_not_found=True): try: (rc, resp) = request(manager_url + endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing id for display name %s. Error [%s]' % (display_name, to_native(err))) for result in resp['results']: if result.__contains__('display_name') and result['display_name'] == display_name: return result['uuid'] if exit_if_not_found: module.fail_json(msg='No id exist with display name %s' % display_name) def wait_till_create(node_id, module, manager_url, mgr_username, mgr_password, validate_certs): try: count = 0; while True: time.sleep(10) count = count + 1 (rc, resp) = request(manager_url+ '/transport-nodes/%s/state'% node_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if any(resp['state'] in progress_status for progress_status in IN_PROGRESS_STATES) and \ any(resp['node_deployment_state']['state'] in progress_status for progress_status in IN_PROGRESS_STATES): if count == 360: #Wait for max 60 minutes for host to realize module.fail_json(msg= 'Error creating transport node: creation state %s, node_deployment_state %s, Failure message: %s'%(str(resp['state']), str(resp['node_deployment_state']['state']), str(resp['failure_message']))) elif any(resp['state'] in progress_status for progress_status in SUCCESS_STATES) and\ any(resp['node_deployment_state']['state'] in progress_status for progress_status in SUCCESS_STATES): time.sleep(5) return elif any(resp['state'] in progress_status for progress_status in FAILED_STATES) or\ any(resp['node_deployment_state']['state'] in progress_status for progress_status in FAILED_STATES): module.fail_json(msg= 'Error creating transport node: creation state %s, node_deployment_state %s'%(str(resp['state']), str(resp['node_deployment_state']['state']))) else: if count == 360: module.fail_json(msg= 'Error creating transport node: creation state %s, node_deployment_state %s'%(str(resp['state']), str(resp['node_deployment_state']['state']))) except Exception as err: module.fail_json(msg='Error accessing transport node. Error [%s]' % (to_native(err))) def wait_till_delete(vm_id, module, manager_url, mgr_username, mgr_password, validate_certs): try: while True: (rc, resp) = request(manager_url+ '/transport-nodes/%s/state'% vm_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) time.sleep(10) except Exception as err: time.sleep(5) return def cmp_dict(dict1, dict2): # dict1 contain dict2 #print dict2 for k2, v2 in dict2.items(): found = False if k2 not in dict1: continue if type(v2) != list and dict1[k2] != dict2[k2]: return False for obj2 in v2: for obj1 in dict1[k2]: if all(item in obj1.items() for item in obj2.items()): found = True if not found: return False return True def update_params_with_id (module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_params ): if transport_node_params.__contains__('host_switch_spec'): for host_switch in transport_node_params['host_switch_spec']['host_switches']: if host_switch.__contains__('host_switch_type') and host_switch[ 'host_switch_type'] in FABRIC_VIRTUAL_SWITCH_TYPE: if host_switch.__contains__('host_switch_name'): host_switch['host_switch_id'] = get_host_switch_id_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, '/fabric/virtual-switches', host_switch['host_switch_name']) else: module.fail_json( msg='Failing as host_switch_name is not provided for host switch of type: %s' % host_switch[ 'host_switch_type']) host_switch_profiles = host_switch.pop('host_switch_profiles', None) host_switch_profile_ids = [] if host_switch_profiles is not None: for host_switch_profile in host_switch_profiles: profile_obj = {} profile_obj['value'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/host-switch-profiles?include_system_owned=true", host_switch_profile['name']) profile_obj['key'] = host_switch_profile['type'] host_switch_profile_ids.append(profile_obj) host_switch['host_switch_profile_ids'] = host_switch_profile_ids ip_pool_id = None if host_switch.__contains__('ip_assignment_spec') and host_switch['ip_assignment_spec']['resource_type'] == 'StaticIpPoolSpec': ip_pool_name = host_switch['ip_assignment_spec'].pop('ip_pool_name', None) host_switch['ip_assignment_spec']['ip_pool_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/pools/ip-pools", ip_pool_name) if host_switch.__contains__('transport_zone_endpoints'): for transport_zone_endpoint in host_switch['transport_zone_endpoints']: transport_zone_name = transport_zone_endpoint.pop('transport_zone_name', None) transport_zone_endpoint['transport_zone_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-zones", transport_zone_name) if host_switch.__contains__('vmk_install_migration'): for network in host_switch['vmk_install_migration']: if network.__contains__('destination_network'): network['destination_network'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/logical-switches", network['destination_network']) if transport_node_params.__contains__('transport_zone_endpoints'): for transport_zone_endpoint in transport_node_params['transport_zone_endpoints']: transport_zone_name = transport_zone_endpoint.pop('transport_zone_name', None) transport_zone_endpoint['transport_zone_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/transport-zones", transport_zone_name) if transport_node_params.__contains__('node_deployment_info') and transport_node_params['node_deployment_info'].__contains__('resource_type') and transport_node_params['node_deployment_info']['resource_type'] == 'EdgeNode': vc_name = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('vc_name', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['vc_id'] = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/fabric/compute-managers", vc_name) transport_node_params['display_name'] = transport_node_params.pop('display_name', None) return transport_node_params def id_exist_in_list_dict_obj(key, list_obj1, list_obj2): all_id_presents = False if len(list_obj1) != len(list_obj2): return all_id_presents for dict_obj1 in list_obj1: if dict_obj1.__contains__(key): for dict_obj2 in list_obj2: if dict_obj2.__contains__(key) and dict_obj1[key] == dict_obj2[key]: all_id_presents = True continue if not all_id_presents: return False return True def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_with_ids): existing_transport_node = get_tn_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_with_ids['display_name']) if existing_transport_node is None: return False if not existing_transport_node.__contains__('description') and transport_node_with_ids.__contains__('description'): return True if existing_transport_node.__contains__('description') and transport_node_with_ids.__contains__('description') and existing_transport_node['description'] != transport_node_with_ids['description']: return True if existing_transport_node.__contains__('description') and not transport_node_with_ids.__contains__('description'): return True if existing_transport_node.__contains__('tags') and not transport_node_with_ids.__contains__('tags'): return True if not existing_transport_node.__contains__('tags') and transport_node_with_ids.__contains__('tags'): return True if existing_transport_node.__contains__('tags') and transport_node_with_ids.__contains__('tags') and (not compareTags(existing_transport_node, transport_node_with_ids)): return True if transport_node_with_ids.__contains__('host_switch_spec') and transport_node_with_ids['host_switch_spec'].__contains__('host_switches'): existing_host_switches = existing_transport_node['host_switch_spec']['host_switches'] sorted_existing_host_switches = sorted(existing_host_switches, key = lambda i: i['host_switch_name']) sorted_new_host_switches = sorted(transport_node_with_ids['host_switch_spec']['host_switches'], key = lambda i: i['host_switch_name']) if len(sorted_existing_host_switches) != len(sorted_new_host_switches): return True for i in range(len(sorted_existing_host_switches)): diff_obj = {k: sorted_existing_host_switches[i][k] for k in sorted_existing_host_switches[i] if k in sorted_new_host_switches[i] and sorted_existing_host_switches[i][k] != sorted_new_host_switches[i][k]} if not cmp_dict(diff_obj, sorted_new_host_switches[i]): return True return False def get_api_cert_thumbprint(ip_address, module): ip = ipaddress.ip_address(ip_address) if isinstance(ip, ipaddress.IPv4Address): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) elif isinstance(ip, ipaddress.IPv6Address): sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) sock.settimeout(1) wrappedSocket = ssl.wrap_socket(sock) try: wrappedSocket.connect((ip_address, 443)) except Exception as err: module.fail_json(msg='Failed to get node ID from ESXi host with IP {}. Error: {}'.format(ip_address, err)) else: der_cert_bin = wrappedSocket.getpeercert(True) thumb_sha256 = hashlib.sha256(der_cert_bin).hexdigest() return thumb_sha256 finally: wrappedSocket.close() def inject_vcenter_info(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_params): ''' params: - transport_node_params: These are the transport node parameters passed from playbook file result: - takes the vecenter parameters accepted by playbook and converts it into the form accepted by transport node api using pyvmomi functions. ''' vm_deployment_config = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'] if vm_deployment_config.__contains__('ignore_ssl_verification'): ignore_ssl_verification = vm_deployment_config['ignore_ssl_verification'] else: ignore_ssl_verification = True if vm_deployment_config.__contains__('vc_username') and vm_deployment_config.__contains__('vc_password'): vc_name = vm_deployment_config['vc_name'] vc_ip = get_vc_ip_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, "/fabric/compute-managers", vc_name) vc_username = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('vc_username', None) vc_password = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('vc_password', None) if vm_deployment_config.__contains__('host'): host = vm_deployment_config.pop('host', None) host_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'host', host, ignore_ssl_verification) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['host_id'] = str(host_id) storage = vm_deployment_config.pop('storage') storage_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'storage', storage, ignore_ssl_verification) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['storage_id'] = str(storage_id) cluster = vm_deployment_config.pop('compute') cluster_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'cluster', cluster, ignore_ssl_verification) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['compute_id'] = str(cluster_id) management_network = vm_deployment_config.pop('management_network') management_network_id = get_resource_id_from_name(module, vc_ip, vc_username, vc_password, 'network', management_network, ignore_ssl_verification) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['management_network_id'] = str(management_network_id) data_networks = vm_deployment_config.pop('data_networks') data_network_ids = get_data_network_id_from_name(module, vc_ip, vc_username, vc_password, data_networks, ignore_ssl_verification) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['data_network_ids'] = data_network_ids if vm_deployment_config.__contains__('host'): transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('host', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('cluster', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('storage', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('management_network', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('data_networks', None) else: if vm_deployment_config.__contains__('host'): host_id = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('host', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['host_id'] = host_id cluster_id = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('compute', None) storage_id = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('storage', None) management_network_id = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('management_network', None) data_network_ids = transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config'].pop('data_networks', None) transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['compute_id'] = cluster_id transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['storage_id'] = storage_id transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['management_network_id'] = management_network_id transport_node_params['node_deployment_info']['deployment_config']['vm_deployment_config']['data_network_ids'] = data_network_ids transport_node_params['node_deployment_info']['vm_deployment_config'].pop('ignore_ssl_verification', None) def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), ignore_ssl_verification=dict(required=False, type='boolean'), description=dict(required=False, type='str'), host_switch_spec=dict(required=False, type='dict', host_switches=dict(required=True, type='list'), resource_type=dict(required=True, type='str')), node_deployment_info=dict(required=False, type='dict', discovered_node_id=dict(required=False, type='str'), deployment_config=dict(required=False, type='dict', node_user_settings=dict(required=True, type='dict', cli_username=dict(required=False, type='str'), audit_username=dict(required=False, type='str'), root_password=dict(required=False, type='str', no_log=True), cli_password=dict(required=False, type='str', no_log=True), audit_password=dict(required=False, type='str', no_log=True)), vm_deployment_config=dict(required=True, type='dict', data_networks=dict(required=True, type='list'), management_network=dict(required=True, type='str'), vc_username=dict(required=False, type='str'), vc_password=dict(required=False, type='str', no_log=True), placement_type=dict(required=True, type='str'), compute=dict(required=True, type='str'), vc_name=dict(required=True, type='str'), ipv4_assignment_enabled=dict(required=False, type='boolean'), ipv6_assignment_type=dict(required=False, type='str'), storage=dict(required=True, type='str'), default_gateway_addresses=dict(required=False, type='list'), management_port_subnets=dict(required=False, type='list'), host=dict(required=False, type='str'), reservation_info=dict(required=False, type='dict', cpu_reservation=dict(required=False, type='dict', reservation_in_mhz=dict(required=False, type='int'), reservation_in_shares=dict(required=False, type='str')), memory_reservation=dict(required=False, type='dict', reservation_percentage=dict(required=False, type='int'))), resource_allocation=dict(required=False, type='dict', cpu_count=dict(required=False, type='int'), memory_allocation_in_mb=dict(required=False, type='int'))), form_factor=dict(required=False, type='str')), discovered_ip_addresses=dict(required=False, type='list'), ip_addresses=dict(required=False, type='list'), node_settings=dict(required=False, type='dict', advanced_configuration=dict(required=False, type='str'), allow_ssh_root_login=dict(required=False, type='boolean'), dns_servers=dict(required=False, type='str'), enable_ssh=dict(required=False, type='boolean'), hostname=dict(required=True, type='str'), ntp_servers=dict(required=False, type='list'), search_domains=dict(required=False, type='list'), syslog_servers=dict(required=False, type='list')), fqdn=dict(required=False, type='str'), os_version=dict(required=False, type='str'), managed_by_server=dict(required=False, type='str'), host_credential=dict(required=False, type='dict', username=dict(required=False, type='str'), password=dict(required=False, type='str', no_log=True), thumbprint=dict(required=False, type='str')), allocation_list=dict(required=False, type='list'), os_type=dict(required=True, type='str'), external_id=dict(required=False, type='str'), resource_type=dict(required=True, type='str'), deployment_type=dict(required=False, type='str')), maintenance_mode=dict(required=False, type='str'), remote_tunnel_endpoint=dict(required=False, type='dict', host_switch_name=dict(required=True, type='str'), named_teaming_policy=dict(required=False, type='str'), rtep_vlan=dict(required=True, type='dict', VlanID=dict(required=False, type='int')), ip_assignment_spec=dict(required=True, type='dict', resource_type=dict(required=True, type='str'), ip_pool_id=dict(required=False, type='str'), ip_list=dict(required=False, type='list'), ip_mac_list=dict(required=False, type='list'), default_gateway=dict(required=False, type='dict', IPAddress=dict(required=False, type='str')), subnet_mask=dict(required=False, type='dict', IPAddress=dict(required=False, type='str')))), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) transport_node_params = get_transport_node_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) discovered_node_dict = get_dn_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name) discovered_node_id, revision, node_deployment_revision = None, None, None if discovered_node_dict: discovered_node_id = discovered_node_dict['external_id'] transport_node_dict = get_tn_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) transport_node_id, revision, node_deployment_revision = None, None, None if transport_node_dict: transport_node_id = transport_node_dict['id'] revision = transport_node_dict['_revision'] if state == 'present': if transport_node_params.__contains__('node_deployment_info') and transport_node_params['node_deployment_info']['resource_type'] == 'EdgeNode': inject_vcenter_info(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_params) body = update_params_with_id(module, manager_url, mgr_username, mgr_password, validate_certs, transport_node_params) updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, body) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if not updated: # add the node if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(logical_switch_params)), id='12345') if body["node_deployment_info"].__contains__('host_credential'): if body["node_deployment_info"]["host_credential"].__contains__("thumbprint"): thumbprint = body["node_deployment_info"]["host_credential"]["thumbprint"] else: if not body["node_deployment_info"].__contains__("ip_addresses"): module.fail_json(msg="ESXi ip adresses are not provided") esxi_ip_address = body["node_deployment_info"]["ip_addresses"][0] thumbprint = get_api_cert_thumbprint(esxi_ip_address, module) body["node_deployment_info"]["host_credential"]["thumbprint"] = thumbprint request_data = json.dumps(body) try: if not transport_node_id: transport_node_id = get_id_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, '/transport-nodes', display_name, exit_if_not_found=False) if transport_node_id: module.exit_json(changed=False, id=transport_node_id, message="Transport node with display_name %s already exist."% module.params['display_name']) if discovered_node_id: (rc, resp) = request(manager_url + '/fabric/discovered-nodes/%s?action=create_transport_node' %discovered_node_id, data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) else: (rc, resp) = request(manager_url+ '/transport-nodes', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add transport node. Request body [%s]. Error[%s]." % (request_data, to_native(err))) wait_till_create(resp['node_id'], module, manager_url, mgr_username, mgr_password, validate_certs) time.sleep(5) module.exit_json(changed=True, id=resp["node_id"], body= str(resp), message="Transport node with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(body)), id=transport_node_id) body['_revision'] = revision # update current revision #update node id with tn id - as result of FN TN unification body['node_id'] = transport_node_id request_data = json.dumps(body) id = transport_node_id try: (rc, resp) = request(manager_url+ '/transport-nodes/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update transport node with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["node_id"], body= str(resp), message="Transport node with node id %s updated." % id) elif state == 'absent': # delete the array id = transport_node_id if id is None: module.exit_json(changed=False, msg='No transport node exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_node_params)), id=id) try: (rc, resp) = request(manager_url + "/transport-nodes/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete transport node with id %s. Error[%s]." % (id, to_native(err))) wait_till_delete(id, module, manager_url, mgr_username, mgr_password, validate_certs) time.sleep(5) module.exit_json(changed=True, object_name=id, message="Transport node with node id %s deleted." % id) def compareTags(existing_transport_node, new_transport_nodes): return ordered(existing_transport_node['tags']) == ordered(new_transport_nodes['tags']) def ordered(obj): if isinstance(obj, dict): return sorted((k, ordered(v)) for k, v in obj.items()) if isinstance(obj, list): return sorted(ordered(x) for x in obj) else: return obj if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_nodes_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_nodes_facts short_description: List Transport Nodes description: Returns information about all transport nodes along with underlying host or edge details. A transport node is a host or edge that contains hostswitches. A hostswitch can have virtual machines connected to them. Because each transport node has hostswitches, transport nodes can also have virtual tunnel endpoints, which means that they can be part of the overlay. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Transport Nodes nsxt_transport_nodes_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/transport-nodes', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zone. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_zones.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_zones short_description: Create a Transport Zone description: "Creates a new transport zone. The required parameters are host_switch_name and transport_type (OVERLAY or VLAN). The optional parameters are description and display_name. This api is now deprecated. Please use new api - PUT /infra/sites//enforcement-points//transport-zones/" version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str description: description: Description of this resource required: false display_name: description: Identifier to use when displaying entity in logs or GUI required: true type: str is_default: description: Only one transport zone can be the default one for a given transport zone type. APIs that need transport zone can choose to use the default transport zone if a transport zone is not given by the user. required: false type: boolean nested_nsx: description: The flag only need to be set in nested NSX environment. required: false type: boolean resource_type: description: Should be set to the value PolicyTransportZone required: false state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true tz_type: description: Valid values are OVERLAY_BACKED , VLAN_BACKED required: true type: str tags: description: Opaque identifier meaningful to API user required: false type: Array of Tag transport_zone_profile_ids: description: Identifiers of the transport zone profiles associated with this TransportZone. required: false type: array of TransportZoneProfileTypeIdEntry uplink_teaming_policy_names: description: The names of switching uplink teaming policies that all transport nodes in this transport zone must support. An exception will be thrown if a transport node within the transport zone does not support a named teaming policy. The user will need to first ensure all trasnport nodes support the desired named teaming policy before assigning it to the transport zone. If the field is not specified, the host switch's default teaming policy will be used. required: false type: list enforcementpoint_id: description: The EnforcementPoint ID where the TZ is located. Required if transport_zone_id is specified. default: default type: str site_id: description: The site ID where the EnforcementPoint is located. Required if transport_zone_id is specified. default: default type: str ''' EXAMPLES = ''' - name: Create transport zone nsxt_transport_zones: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False resource_type: "PolicyTransportZone" display_name: "TZ1" description: "NSX configured Test Transport Zone" tz_type: "VLAN_BACKED" state: "present" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import TRANSPORT_ZONE_URL from ansible.module_utils._text import to_native def get_transport_zone_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_transport_zone_baseURL(transport_zone_params): if transport_zone_params.__contains__('display_name'): site_id = transport_zone_params.pop("site_id", 'default') enforcementpoint_id = transport_zone_params.pop("enforcementpoint_id", 'default') transport_zone_base_url = (TRANSPORT_ZONE_URL.format(site_id, enforcementpoint_id)) return transport_zone_base_url def get_transport_zones(module, manager_url, mgr_username, mgr_password, validate_certs, transport_zone_base_url): try: (rc, resp) = request(manager_url + transport_zone_base_url, method='GET', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zones. Error [%s]' % (to_native(err))) return resp def get_tz_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name,transport_zone_base_url): transport_zones = get_transport_zones(module, manager_url, mgr_username, mgr_password, validate_certs, transport_zone_base_url) for transport_zone in transport_zones['results']: if transport_zone.__contains__('display_name') and transport_zone['display_name'] == display_name: return transport_zone return None def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_zone_params, transport_zone_base_url): existing_transport_zone = get_tz_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, transport_zone_params['display_name'], transport_zone_base_url) if existing_transport_zone is None: return False if existing_transport_zone.__contains__('is_default') and transport_zone_params.__contains__('is_default') and \ existing_transport_zone['is_default'] != transport_zone_params['is_default']: return True if not existing_transport_zone.__contains__('description') and transport_zone_params.__contains__('description'): return True if existing_transport_zone.__contains__('description') and not transport_zone_params.__contains__('description'): return True if existing_transport_zone.__contains__('description') and transport_zone_params.__contains__('description') and \ existing_transport_zone['description'] != transport_zone_params['description']: return True if not existing_transport_zone.__contains__('uplink_teaming_policy_names') and transport_zone_params.__contains__( 'uplink_teaming_policy_names'): return True if existing_transport_zone.__contains__('uplink_teaming_policy_names') and not transport_zone_params.__contains__( 'uplink_teaming_policy_names'): return True if existing_transport_zone.__contains__('uplink_teaming_policy_names') and transport_zone_params.__contains__( 'uplink_teaming_policy_names') and \ existing_transport_zone['uplink_teaming_policy_names'] != transport_zone_params[ 'uplink_teaming_policy_names']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), tz_type=dict(required=True, choice=['VLAN_BACKED', 'OVERLAY_BACKED']), nested_nsx=dict(required=False, type='bool'), uplink_teaming_policy_names=dict(required=False, type='list'), transport_zone_profile_paths=dict(required=False, type='list'), is_default=dict(required=False, type='bool'), resource_type=dict(required=False, type='str'), description=dict(required=False, type='str'), tags=dict(required=False, type='list'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) transport_zone_params = get_transport_zone_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/policy/api/v1'.format(mgr_hostname) transport_zone_base_url = get_transport_zone_baseURL(transport_zone_params) zone_dict = get_tz_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name, transport_zone_base_url) zone_id, revision = None, None if zone_dict: zone_id = zone_dict['id'] revision = zone_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, transport_zone_params, transport_zone_base_url) if not updated: # add the node if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_zone_params)), id='12345') request_data = json.dumps(transport_zone_params) try: if zone_id: module.exit_json(changed=False, id=zone_id, message="Transport zone with display_name %s already exist." % module.params[ 'display_name']) (rc, resp) = request(manager_url + transport_zone_base_url + '/%s' % module.params['display_name'], data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json( msg="Failed to add transport zone. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body=str(resp), message="Transport zone with display name %s created. " % (module.params['display_name'])) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_zone_params)), id=zone_id) transport_zone_params['_revision'] = revision # update current revision request_data = json.dumps(transport_zone_params) id = zone_id try: (rc, resp) = request(manager_url + transport_zone_base_url + '/%s' % id, data=request_data, headers=headers, method='PATCH', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update transport zone with id %s. Request body [%s]. Error[%s]." % ( id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body=str(resp), message="Transport zone with zone id %s updated." % id) elif state == 'absent': # delete the array id = zone_id if id is None: module.exit_json(changed=False, msg='No transport zone exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(transport_zone_params)), id=id) try: (rc, resp) = request(manager_url + transport_zone_base_url + "/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete transport zone with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="Transport zone with zone id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_transport_zones_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_transport_zones_facts short_description: List Transport Zones description: Returns information about configured transport zones. NSX requires at least one transport zone. NSX uses transport zones to provide connectivity based on the topology of the underlying network, trust zones, or organizational separations. For example, you might have hypervisors that use one network for management traffic and a different network for VM traffic. This architecture would require two transport zones. The combination of transport zones plus transport connectors enables NSX to form tunnels between hypervisors. Transport zones define which interfaces on the hypervisors can communicate with which other interfaces on other hypervisors to establish overlay tunnels or provide connectivity to a VLAN. A logical switch can be in one (and only one) transport zone. This means that all of a switch's interfaces must be in the same transport zone. However, each hypervisor virtual switch (OVS or VDS) has multiple interfaces (connectors), and each connector can be attached to a different logical switch. For example, on a single hypervisor with two connectors, connector A can be attached to logical switch 1 in transport zone A, while connector B is attached to logical switch 2 in transport zone B. In this way, a single hypervisor can participate in multiple transport zones. The API for creating a transport zone requires that a single host switch be specified for each transport zone, and multiple transport zones can share the same host switch. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Transport Zones nsxt_transport_zones_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/transport-zones', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zone. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_eula_accept.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_eula_accept short_description: 'Accept end user license agreement' description: "Accept end user license agreement " version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Accepts end user license agreement. nsxt_upgrade_eula_accept: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_upgrade_orchestrator_node from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) # Accept the upgrade EULA if module.check_mode: module.exit_json(changed=False, debug_out='Upgrade EULA will be' ' accepted.', id=mgr_hostname) try: (rc, resp) = request(manager_url+ '/upgrade/eula/accept', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Failed to accept end user license' ' agreement. Error[%s].' % to_native(err)) time.sleep(5) module.exit_json(changed=True, result=resp, message='End user license agreement' ' is accepted.') if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_eula_accept_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_eula_accept_facts short_description: 'Gets EULA acceptance status and contents' description: "Returns EULA acceptance status and the contents." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str required_info: choices: - acceptance - contents description: "required_info can be either 'acceptance' or 'contents'. 'acceptance' returns the acceptance status of end user license agreement . 'contents' Return the content of end user license agreement in the specified format. By default, it's pure string without line break. " required: true ''' EXAMPLES = ''' - name: Gets EULA acceptance status and contents nsxt_upgrade_eula_accept_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False required_info: "acceptance" ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_id_from_display_name_results from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update(required_info=dict(required=True, type='str', choices=['acceptance', 'contents'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] required_info = module.params['required_info'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) if required_info == 'acceptance': try: (rc, resp) = request(manager_url + '/upgrade/eula/acceptance', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing upgrade EULA acceptance ' 'status. Error [%s]' % (to_native(err))) module.exit_json(changed=False, **resp) elif required_info == 'contents': try: (rc, resp) = request(manager_url + '/upgrade/eula/content', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing upgrade EULA contents ' 'status. Error [%s]' % (to_native(err))) module.exit_json(changed=False, **resp) else: module.fail_json(msg='Invalid value passed for required_info.') if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_groups.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_groups short_description: 'Create a group of upgrade units.' description: 'Create a group of upgrade units.' version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str type: description: 'Component type' required: true type: str parallel: description: 'Upgrade Method to specify whether the upgrade is to be performed serially or in parallel' required: false type: boolean upgrade_unit_count: description: 'Count of upgrade units in the group' required: false type: int upgrade_units: description: 'List of upgrade units in the group' required: false type: list enabled: description: 'Flag to indicate whether upgrade of this group is enabled or not' required: false type: boolean extended_configuration: description: 'Extended configuration for the group' required: false type: list resource_type: description: 'Resource type' required: false type: str tags: description: 'Opaque identifiers meaningful to the API user' required: false type: list state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Modifies default upgrade Group nsxt_upgrade_groups: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "MyUpgradeGroup" type: 'MP' parallel: True enabled: True state: Present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import clean_and_get_params, get_id_from_display_name_results, get_upgrade_orchestrator_node from ansible.module_utils._text import to_native def update_group_parameters(module, manager_url, mgr_username, mgr_password, validate_certs, upgrade_group_parameters): if upgrade_group_parameters.__contains__('upgrade_units'): for upgrade_unit in upgrade_group_parameters['upgrade_units']: host_name = upgrade_unit.pop('host_name', None) upgrade_unit_id = get_id_from_display_name_results(module, manager_url, '/upgrade/upgrade-units', mgr_username, mgr_password, validate_certs, ['display_name'], ['id'], host_name) upgrade_unit['id']= upgrade_unit_id return upgrade_group_parameters def main(): argument_spec = vmware_argument_spec() argument_spec.update(description=dict(type='str', required=False), display_name=dict(type='str', required=True), enabled=dict(type='bool', required=False, default=True), extended_configuration=dict(type='list', required=False), parallel=dict(type='bool', required=False, default=True), resource_type=dict(type='str', required=False), tags=dict(type='list', required=False), type=dict(type='str', required=True), upgrade_unit_count=dict(type='int', required=False), upgrade_units=dict(type='list', required=False), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) upgrade_group_params = clean_and_get_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) upgrade_group_params = update_group_parameters(module, manager_url, mgr_username, mgr_password, validate_certs, upgrade_group_params) upgrade_unit_group_id = get_id_from_display_name_results(module, manager_url, '/upgrade/upgrade-unit-groups', mgr_username, mgr_password, validate_certs, ['display_name'], ['id'], upgrade_group_params['display_name'], False) if state == 'present': # create a new upgrade group or modify the existing one if module.check_mode: module.exit_json(changed=False, debug_out='A new upgrade unit will be created with' ' name: %s' % module.params['display_name']) request_data = json.dumps(upgrade_group_params) if upgrade_unit_group_id is None: try: (rc, resp) = request(manager_url + '/upgrade/upgrade-unit-groups', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add upgrade group. Error[%s]." % to_native(err)) time.sleep(5) module.exit_json(changed=True, message="Upgrade group is added successfully.") else: try: (rc, resp) = request(manager_url + '/upgrade/upgrade-unit-' 'groups/%s' % upgrade_unit_group_id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to modify upgrade group. Error[%s]." % to_native(err)) time.sleep(5) module.exit_json(changed=True, message='Upgrade group with group id ' '%s is updated.' % upgrade_unit_group_id) elif state == 'absent': # remove an existing upgrade group try: (rc, resp) = request(manager_url+ '/upgrade/upgrade-unit-groups' '/%s' % upgrade_unit_group_id, data='', headers=headers, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Failed while deleting the upgrade' ' group. Error[%s].' % to_native(err)) time.sleep(5) module.exit_json(changed=True, message='Upgrade group with group id ' '%s is deleted.' % upgrade_unit_group_id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_groups_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_group_facts short_description: 'Get the upgrade groups information' description: 'Get the upgrade groups information' version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Get the upgrade groups information nsxt_upgrade_group_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url + '/upgrade/upgrade-unit-groups/aggregate-info', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while retrieving upgrade group ' 'information. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_history.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_history short_description: 'Get upgrade history' description: "Get upgrade history" version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Get upgrade history nsxt_upgrade_history: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/upgrade/history', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while retrieving bundle information. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_plan.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_plan short_description: 'Upgrade plan settings for the component' description: 'Upgrade plan settings for the component' version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str component_type: description: 'Component whose upgrade plan is to be changed' choices: - host - edge - mp required: true type: str parallel: description: 'Upgrade Method to specify whether the upgrade is to be performed serially or in parallel' required: true type: boolean pause_after_each_group: description: 'Flag to indicate whether to pause the upgrade after upgrade of each group is completed' required: true type: boolean pause_on_error: description: 'Flag to indicate whether to pause the upgrade plan execution when an error occurs' required: true type: boolean state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Modifies default upgrade plan nsxt_upgrade_plan: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False component_type: 'host' parallel: True pause_after_each_group: True pause_on_error: True ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_attribute_from_endpoint, clean_and_get_params, get_upgrade_orchestrator_node from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update(component_type=dict(type='str', required=True, choices=['host', 'edge', 'mp']), parallel=dict(type='bool', required=False), pause_after_each_group=dict(type='bool', required=False), pause_on_error=dict(type='bool', required=False), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) upgrade_plan_params = clean_and_get_params(module.params.copy(), ['component_type']) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] component_type = module.params['component_type'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) if state == 'present': # update the default upgrade plan if module.check_mode: module.exit_json(changed=False, debug_out='Upgrade Plan will be modified.' ' parallel: %s, pause_after_each_group: %s, pause_on_error: %s' % (module.params['parallel'], module.params['pause_after_each_group'], module.params['pause_on_error']), id=module.params['component_type']) request_data = json.dumps(upgrade_plan_params) try: (rc, resp) = request(manager_url+ '/upgrade/plan/%s/settings' % component_type.upper(), data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update upgrade plan. Error[%s]." % to_native(err)) time.sleep(5) module.exit_json(changed=True, message="Upgrade plan is updated.") elif state == 'absent': # reset to default upgrade plan try: (rc, resp) = request(manager_url+ '/upgrade/plan?action=reset&' 'component_type=%s' % component_type.upper(), data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed while reseting the upgrade plan. Error[%s]." % to_native(err)) time.sleep(5) module.exit_json(changed=True, message="Upgrade plan is reset.") if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_plan_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_plan_facts short_description: 'Get the upgrade plan settings for the component.' description: "Get the upgrade plan settings for the component." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str component_type: description: 'Component whose upgrade plan is to be changed' choices: - host - edge - mp required: true type: str ''' EXAMPLES = ''' - name: Get uploaded upgrade bundle information nsxt_upload_upgrade_bundle_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False component_type: "host" ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update(component_type=dict(required=True, type='str', choices=['host', 'edge', 'mp'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] if module.params['component_type'] is None: module.fail_json(msg='Error: parameter component_type not provided') else: component_type = module.params['component_type'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/upgrade/plan/%s/settings' % component_type.upper(), headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while retrieving bundle information. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_postchecks.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_postchecks short_description: 'Execute post-upgrade checks' description: "Run pre-defined checks to identify potential issues which can be encountered after an upgrade. The results of the checks are added to the respective upgrade units aggregate-info. The progress and status of operation is part of upgrade status summary of individual components." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str component_type: choices: - host - mp - edge description: "Component type on which post upgrade is to be run." required: true ''' EXAMPLES = ''' - name: Runs post-upgrade checks nsxt_upgrade_postchecks: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False component_type: 'mp' ''' RETURN = '''# ''' import json, time from csv import reader from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_attribute_from_endpoint, clean_and_get_params, get_upgrade_orchestrator_node from ansible.module_utils._text import to_native def wait_for_post_upgrade_checks_to_execute(module, manager_url, endpoint, mgr_username, mgr_password, validate_certs, component_type, time_out=10800): ''' params: - endpoint: API endpoint. - attribute_list: The attribute whose value should become the desired attribute value - desired_attribute_value: The desired attribute value Function will wait till the attribute value derived from going deep to attribute list becomes equal to desired_attribute_value. ''' operation_time = 0 while True: try: (rc, resp) = request(manager_url + endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed while polling for post upgrade checks to complete. Error[%s]." % to_native(err)) if resp.__contains__('results'): flag = True results = resp['results'] for result in results: if result['post_upgrade_status']['status'] != 'COMPLETED' and \ result['type'] == component_type.upper() and \ result['upgrade_unit_count'] > 0 and \ result['status'] != 'NOT_STARTED': flag = False if flag: return None time.sleep(15) operation_time = operation_time + 15 if operation_time > time_out: raise Exception('Operation timed out.') def main(): argument_spec = vmware_argument_spec() argument_spec.update(timeout=dict(type='int', required=False), component_type=dict(required=True, choices=['mp', 'host', 'edge'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] timeout = module.params['timeout'] component_type= module.params['component_type'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) #if state == 'present': # Runs post upgrade checks if module.check_mode: module.exit_json(changed=False, debug_out='Post upgrade checks will be executed.', id='Post upgrade checks') try: (rc, resp) = request(manager_url + '/upgrade/%s?action=execute_post_upgrade_' 'checks' % component_type.upper(), data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to execute post upgrade checks. Error[%s]." % to_native(err)) try: if timeout is None: wait_for_post_upgrade_checks_to_execute(module, manager_url, '/upgrade/upgrade-unit-groups' '/aggregate-info', mgr_username, mgr_password, validate_certs, component_type) else: wait_for_post_upgrade_checks_to_execute(module, manager_url, '/upgrade/upgrade-unit-groups' '/aggregate-info', mgr_username, mgr_password, validate_certs, component_type, timeout) except Exception as err: module.fail_json(msg='Error while polling for execution of post upgrade' ' checks. Error [%s]' % to_native(err)) time.sleep(5) changed = True try: (rc, resp) = request(manager_url+ '/upgrade/upgrade-unit-groups/aggregate-info', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg='Post upgrade checks were executed successfully but error' ' occured while retrieving the results. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, message='Post upgrade checks are performed successfully:\n' '----------------------------\n' + str(resp)) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_pre_post_checks_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_pre_post_checks_facts short_description: 'Get the pre and post upgrade checks' description: 'Get the pre and post upgrade checks' version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Get the pre upgrade and post upgrade checks nsxt_upgrade_pre_post_checks_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/upgrade/upgrade-checks-info', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while retrieving pre and post upgrade checks. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_prechecks.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_prechecks short_description: 'Execute pre-upgrade checks' description: "Run pre-defined checks to identify potential issues which can be encountered during an upgrade or can cause an upgrade to fail. The results of the checks are added to the respective upgrade units aggregate-info. The progress and status of operation is part of upgrade status summary of individual components." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str timeout: description: 'Timeout while polling for prechecks to complete' required: false type: int state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to run pre upgrade checks. 'absent' is used to abort preupgrade checks." required: true ''' EXAMPLES = ''' - name: Runs and aborts pre-upgrade checks nsxt_upgrade_prechecks: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False state: 'present' ''' RETURN = '''# ''' import json, time from csv import reader from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_attribute_from_endpoint, clean_and_get_params, get_upgrade_orchestrator_node from ansible.module_utils._text import to_native def wait_for_pre_upgrade_checks_to_execute(module, manager_url, endpoint, mgr_username, mgr_password, validate_certs, time_out=10800): ''' params: - endpoint: API endpoint. - attribute_list: The attribute whose value should become the desired attribute value - desired_attribute_value: The desired attribute value Function will wait till the attribute value derived from going deep to attribute list becomes equal to desired_attribute_value. ''' operation_time = 0 while True: try: (rc, resp) = request(manager_url + endpoint, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: pass if resp.__contains__('component_status'): flag = True component_statuses = resp['component_status'] for component_status in component_statuses: if component_status['pre_upgrade_status']['status'] == 'ABORTED': module.exit_json(changed= False, message='Pre upgrade checks started to run,' ' but aborted before they could finish.') if component_status['pre_upgrade_status']['status'] != 'COMPLETED': flag = False if flag: return None time.sleep(15) operation_time = operation_time + 15 if operation_time > time_out: raise Exception('Operation timed out.') def main(): argument_spec = vmware_argument_spec() argument_spec.update(timeout=dict(type='int', required=False), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) upgrade_prechecks_params = clean_and_get_params(module.params.copy(), ['timeout']) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] timeout = module.params['timeout'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) if state == 'present': # Runs pre upgrade checks if module.check_mode: module.exit_json(changed=False, debug_out='Pre upgrade checks will be executed.', id='Pre upgrade checks') request_data = json.dumps(upgrade_prechecks_params) try: (rc, resp) = request(manager_url + '/upgrade?action=execute_pre_upgrade_checks', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to execute pre upgrade checks. Error[%s]." % to_native(err)) try: if timeout is None: wait_for_pre_upgrade_checks_to_execute(module, manager_url, '/upgrade/status-summary', mgr_username, mgr_password, validate_certs) else: wait_for_pre_upgrade_checks_to_execute(module, manager_url, '/upgrade/status-summary', mgr_username, mgr_password, validate_certs, timeout) except Exception as err: module.fail_json(msg='Error while polling for execution of pre upgrade' ' checks. Error [%s]' % to_native(err)) time.sleep(5) changed = False try: (rc, resp) = request(manager_url+ '/upgrade/pre-upgrade-checks/failures', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg='Pre upgrade checks were executed successfully but error' ' occured while retrieving the results. Error [%s]' % (to_native(err))) # Fail module in case any pre upgrade check fails prechecks_failure = False if 'results' in resp: for result in resp['results']: if 'type' in result and result['type'] == 'FAILURE': prechecks_failure = True if prechecks_failure: module.fail_json(msg='Pre upgrade checks are performed successsfully. Found errors. ' 'Thus, you cannot proceed. To get full report run upgrade groups ' 'facts module. Precheck results: %s' % str(resp)) module.exit_json(changed=changed, message='Pre upgrade checks are performed successfully:' ' Failures are listed. To get full report run upgrade groups ' 'facts module.' + str(resp)) elif state == 'absent': # Aborts pre upgrade checks try: (rc, resp) = request(manager_url + '/upgrade?action=abort_pre_upgrade_checks', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to abort running pre upgrade checks. Error[%s]." % to_native(err)) time.sleep(5) module.exit_json(changed=True, message="Upgrade prechecks are aborted.") if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_run.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_run short_description: 'Start the upgrade' description: 'Upgrade will start as per the upgrade plan.' version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str paused_upgrade: description: 'Mode of upgrade' required: true type: bool ''' EXAMPLES = ''' - name: Runs the upgrade nsxt_upgrade_run: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False paused_upgrade: True ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import get_attribute_from_endpoint, clean_and_get_params, get_upgrade_orchestrator_node from ansible.module_utils._text import to_native from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.upgrade_reverse_order import trigger_upgrade_reverse_order def get_upgrade_status(module, manager_url, mgr_username, mgr_password, validate_certs): ''' Get the current status of upgrade at the start. Doesn't upgrade if any component is in progress or system is already upgraded. ''' no_of_checks = 0 while True: endpoint = "/upgrade/upgrade-unit-groups?sync=true" call_get_sync(manager_url, endpoint, mgr_username, mgr_password, validate_certs) upgrade_status = get_attribute_from_endpoint(module, manager_url, '/upgrade/status-summary', mgr_username, mgr_password, validate_certs, 'overall_upgrade_status', False) no_of_checks = no_of_checks + 1 if upgrade_status == 'IN_PROGRESS' or upgrade_status == 'PAUSING': if no_of_checks > 2: module.fail_json(msg='Upgrade is in state: %s, can\'t continue' % upgrade_status) elif upgrade_status == 'SUCCESS': module.exit_json(changed=False, message='Upgrade state is SUCCESS. No need to' ' continue.') else: return upgrade_status time.sleep(20) def call_get_sync(managerUrl, endpoint, mgrUsername, mgrPassword, validateCerts): request(managerUrl + endpoint, method='GET', url_username=mgrUsername, url_password=mgrPassword, validate_certs=validateCerts, ignore_errors=True) def decide_next_step(module, manager_url, mgr_username, mgr_password, validate_certs, can_continue, is_failed): ''' params: - can_continue: if upgrade can be continued - is_failed: Is there any component Failure return: - Decides the next operation to be done based on can_continue and is_failed values ''' if can_continue and is_failed: return elif can_continue and not is_failed: return elif not can_continue and is_failed: raise Exception('Upgrade failed. Please run upgrade status summary' ' to see the reason of upgrade failure.') else: time.sleep(15) try: upgrade_status = get_attribute_from_endpoint(module, manager_url, '/upgrade/summary', mgr_username, mgr_password, validate_certs, 'upgrade_status', False) except Exception as err: return if upgrade_status == 'SUCCESS': module.exit_json(changed=True, message='System has been upgraded successfully!!!') elif upgrade_status == 'IN_PROGRESS' or upgrade_status == 'PAUSING' or upgrade_status == 'PAUSED': return else: module.fail_json(msg='All components till last one are upgraded. Still upgrade status' ' is %s. Please run upgrade status summary to see the reason.' % upgrade_status) def check_continuity(module, manager_url, mgr_username, mgr_password, validate_certs): ''' Returns: Based on the output of upgrade status summary API, gets the checks and returns if upgrade can be continued and if there is any component fail in the upgrade ''' try: component_status_list = get_attribute_from_endpoint(module, manager_url, '/upgrade/status-summary', mgr_username, mgr_password, validate_certs, 'component_status', False) except Exception as err: can_continue = True is_failed = True return can_continue, is_failed try: can_continue = True for component_status in component_status_list: if component_status['status'] == 'IN_PROGRESS' or \ component_status['status'] == 'PAUSING': can_continue = False break if not can_continue: return can_continue, False else: is_failed = False found_not_started = False for component_status in component_status_list[::-1]: if component_status['status'] == 'NOT_STARTED': found_not_started = True elif component_status['status'] == 'PAUSED': can_continue = True is_failed = False return can_continue, is_failed elif component_status['status'] == 'SUCCESS': if not found_not_started: can_continue = False is_failed = False return can_continue, is_failed else: can_continue = True is_failed = False return can_continue, is_failed elif component_status['status'] == 'FAILED': can_continue = False is_failed = True return can_continue, is_failed elif component_status['status'] == 'IN_PROGRESS' or \ component_status['status'] == 'PAUSING': can_continue = False is_failed = False return can_continue, is_failed else: return True, True except Exception as err: can_continue = True is_failed = True return can_continue, is_failed def fetch_target_version(module, manager_url, mgr_username, mgr_password, validate_certs): try: target_version = get_attribute_from_endpoint(module, manager_url, '/upgrade/summary', mgr_username, mgr_password, validate_certs, 'target_version', False) except Exception as err: return return target_version def main(): argument_spec = vmware_argument_spec() argument_spec.update(paused_upgrade=dict(type='bool', required=True)) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] paused_upgrade = module.params['paused_upgrade'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) target_version = fetch_target_version(module, manager_url, mgr_username, mgr_password, validate_certs) if int(target_version[0])>=9 : trigger_upgrade_reverse_order(module, mgr_hostname, mgr_username, mgr_password, validate_certs) if module.check_mode: if paused_upgrade: module.exit_json(changed=False, debug_out='NSX-T will upgrade with pauses.') else: module.exit_json(changed=False, debug_out='NSX-T will upgrade without pauses.') # If paused_upgrade is not true i.e auto mode if not paused_upgrade: while True: upgrade_status = get_upgrade_status(module, manager_url, mgr_username, mgr_password, validate_certs) if upgrade_status == 'NOT_STARTED': try: (rc, resp) = request(manager_url+ '/upgrade/plan?action=start', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed while upgrading. Error[%s]." % to_native(err)) else: try: (rc, resp) = request(manager_url+ '/upgrade/plan?action=continue', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed while upgrading. Error[%s]." % to_native(err)) time.sleep(10) while True: try: can_continue, is_failed = check_continuity(module, manager_url, mgr_username, mgr_password, validate_certs) decide_next_step(module, manager_url, mgr_username, mgr_password, validate_certs, can_continue, is_failed) if can_continue and not is_failed: break time.sleep(10) except Exception as err: module.fail_json(msg='Upgrade failed. Error: [%s]' % to_native(err)) else: # Paused upgrade i.e manual mode upgrade_status = get_upgrade_status(module, manager_url, mgr_username, mgr_password, validate_certs) if upgrade_status == 'NOT_STARTED': try: (rc, resp) = request(manager_url+ '/upgrade/plan?action=start', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed while upgrading. Error[%s]." % to_native(err)) else: try: (rc, resp) = request(manager_url+ '/upgrade/plan?action=continue', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed while upgrading. Error[%s]." % to_native(err)) time.sleep(10) while True: try: can_continue, is_failed = check_continuity(module, manager_url, mgr_username, mgr_password, validate_certs) decide_next_step(module, manager_url, mgr_username, mgr_password, validate_certs, can_continue, is_failed) if can_continue and not is_failed: break time.sleep(10) except Exception as err: module.fail_json(msg='Upgrade failed. Error: [%s]' % to_native(err)) module.exit_json(changed=True, message='A component has been upgraded successfully.' ' Whole system is not. Please run the module' ' again till the time whole system is' ' not upgraded.') if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_status_summary_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_status_summary_facts short_description: 'Get the upgrade groups information' description: 'Get the upgrade groups information' version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Get upgrade status summary nsxt_upgrade_status_summary_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url + '/upgrade/status-summary', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while retrieving upgrade status summary ' 'information. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_uc.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_uc short_description: 'Upgrade the upgrade coordinator' description: "Upgrade the upgrade coordinator" version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str ''' EXAMPLES = ''' - name: Upgrade UC nsxt_upgrade_uc: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import wait_for_operation_to_execute, get_upgrade_orchestrator_node from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' mgr_hostname = get_upgrade_orchestrator_node(module, mgr_hostname, mgr_username, mgr_password, headers, validate_certs) manager_url = 'https://{}/api/v1'.format(mgr_hostname) # Upgrade UC if module.check_mode: module.exit_json(changed=False, debug_out='Upgrade Coordinator ' 'will be upgraded.', id=mgr_hostname) try: (rc, resp) = request(manager_url+ '/upgrade?action=upgrade_uc', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Failed to upgrade UC. Error[%s].' % to_native(err)) time.sleep(5) try: wait_for_operation_to_execute(manager_url, '/upgrade/uc-upgrade-status', mgr_username, mgr_password, validate_certs, ['state'], ['SUCCESS'], ['FAILED']) except Exception as err: module.fail_json(msg='Error while upgrading UC. Error [%s]' % to_native(err)) module.exit_json(changed=True, result=resp, message='UC is upgraded' ' successfully.') if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_uc_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_uc_facts short_description: Get upgrade-coordinator upgrade status description: Get upgrade-coordinator upgrade status version_added: "2.7" author: 'Kommireddy Akhilesh' options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Gets UC upgrade status nsxt_upgrade_uc_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/upgrade/uc-upgrade-status', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing UC upgrade status. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_upload_mub.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upgrade_upload_mub short_description: 'Uploads upgrade mub' description: "Uploads upgrade mub" version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str file: description: 'The path of the mub file' required: false type: str url: description: 'URL of MUB file' required: false type: str ''' EXAMPLES = ''' - name: Upload MUB upload_mub: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False url: "https://file-server.com/file.mub" ''' RETURN = '''# ''' import atexit import mmap import os import json import time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import wait_for_operation_to_execute from ansible.module_utils._text import to_native def get_upload_mub_params(args=None): args_to_remove = ['username', 'password', 'port', 'hostname', 'validate_certs', 'timeout'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value is None: args.pop(key, None) return args def get_mgr_ip_upgrade_enabled(module, mgr_url, mgr_username, mgr_password, headers, validate_certs): try: (rc, resp) = request(mgr_url + '/node/services/install-upgrade', headers=headers, url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(changed=True, msg='Error getting ip address where ' 'upgrade is enabled. Error: {}'.format(err)) return resp['service_properties']['enabled_on']; def wait_till_upload_done(module, bundle_id, mgr_url, mgr_username, mgr_password, headers, validate_certs): try: while True: (rc, resp) = request(mgr_url + '/upgrade/bundles/%s/upload-status'% bundle_id, headers=headers, url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if resp['status'] == 'FAILED': module.fail_json(msg='Failed to upload upgrade bunlde. Error: %s' % resp['detailed_status']) if resp['status'] == 'SUCCESS': time.sleep(5) return except Exception as err: module.fail_json(changed=True, msg="Error: %s" % err) def upload_mub(module, mgr_url, mgr_username, mgr_password, validate_certs, request_data, headers, ip_address, timeout=10800): endpoint = '/upgrade/bundles' mub_type = 'url' #headers = {} if module.params['file'] is not None: mub_type = 'file' endpoint = endpoint +'?action=upload' if mub_type == 'file': file_path = module.params['file'] try: file_data = open(file_path, 'rb') atexit.register(file_data.close) except Exception as e: module.fail_json(msg='failed to open mub file %s Error: %s' % (file_path, to_native(e))) if os.stat(file_path).st_size == 0: request_data = '' else: request_data = mmap.mmap(file_data.fileno(), 0, access=mmap.ACCESS_READ) atexit.register(request_data.close) from urllib3 import encode_multipart_formdata from urllib3.fields import RequestField with open(file_path, 'rb') as src_file: rf = RequestField('file', src_file.read(), os.path.basename(src_file.name)) rf.make_multipart() body, content_type = encode_multipart_formdata([rf]) headers['Content-Type'] = content_type headers['Content-length'] = len(body) if mub_type == 'url': body = request_data try: (rc, resp) = request(mgr_url + endpoint, data=body, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if rc == 200: bundle_id = 'latest'#resp['bundle_id'] headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' try: wait_for_operation_to_execute(mgr_url, '/upgrade/bundles/%s/upload-status'% bundle_id, mgr_username, mgr_password, validate_certs, ['status'], ['SUCCESS'], ['FAILED']) except Exception as err: module.fail_json(msg='Error while uploading upgrade bundle. Error [%s]' % to_native(err)) module.exit_json(changed=True, ip_address=ip_address, response=resp, message='The upgrade bundle %s got uploaded successfully.' % module.params[mub_type]) else: module.fail_json(msg='Failed to run upload mub. response code: {}' ' response: {}'.format(rc, resp)) except Exception as err: module.fail_json(changed=True, msg="Error: {}".format(err)) def main(): argument_spec = vmware_argument_spec() argument_spec.update(url=dict(type='str'), file=dict(type='str'), timeout=dict(type='int', required=False)) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[('url', 'file')]) upgrade_params = get_upload_mub_params(module.params.copy()) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] timeout = module.params['timeout'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' request_data = json.dumps(upgrade_params) node_ip_address = get_mgr_ip_upgrade_enabled(module, manager_url, mgr_username, mgr_password, headers, validate_certs) update_node_url = 'https://{}/api/v1'.format(node_ip_address) if timeout is not None: upload_mub(module, update_node_url, mgr_username, mgr_password, validate_certs, request_data, headers, node_ip_address, timeout) else: upload_mub(module, update_node_url, mgr_username, mgr_password, validate_certs, request_data, headers, node_ip_address) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_upgrade_upload_mub_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_upload_upgrade_bundle_facts short_description: 'Get uploaded upgrade bundle information.' description: "Get uploaded upgrade bundle information." version_added: '2.7' author: 'Kommireddy Akhilesh' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str bundle_id: description: 'Uploaded bundle ID' required: true type: str ''' EXAMPLES = ''' - name: Get uploaded upgrade bundle information nsxt_upload_upgrade_bundle_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False bundle_id: "2500014166034" ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update(bundle_id=dict(required=True, type='str')) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] if module.params['bundle_id'] is None: module.fail_json(msg='Error: parameter bundle ID not provided') else: bundle_id = module.params['bundle_id'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/upgrade/bundles/%s/upload-status' % bundle_id, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error while retrieving bundle information. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_uplink_profiles.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_uplink_profiles short_description: Create a Hostswitch Profile description: Creates a hostswitch profile. The resource_type is required. For uplink profiles, the teaming and policy parameters are required. By default, the mtu is 1600 and the transport_vlan is 0. The supported MTU range is 1280 through 9000. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str display_name: description: Display name required: true type: str description: description: Description of the resource required: false type: str enabled: description: 'The enabled property specifies the status of NIOC feature. When enabled is set to true, NIOC feature is turned on and the bandwidth allocations specified for the traffic resources are enforced. When enabled is set to false, NIOC feature is turned off and no bandwidth allocation is guaranteed. By default, enabled will be set to true.' required: false type: boolean extra_configs: description: list of extra configs required: false type: array of ExtraConfig host_infra_traffic_res: description: 'host_infra_traffic_res specifies bandwidth allocation for various traffic resources.' required: false type: array of ResourceAllocation lags: description: list of LACP group required: false type: array of Lag mtu: description: Maximum Transmission Unit used for uplinks required: false type: int named_teamings: description: List of named uplink teaming policies that can be used by logical switches required: false type: array of NamedTeamingPolicy overlay_encap: description: The protocol used to encapsulate overlay traffic required: false type: str required_capabilities: description: None required: false type: list resource_type: choices: - UplinkHostSwitchProfile description: Supported HostSwitch profiles. required: true type: str send_enabled: description: Enabled or disabled sending LLDP packets required: false type: boolean tags: description: Opaque identifier meaninful to API user required: false type: Array of Tag state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true teaming: active_list: description: List of Uplinks used in active list required: true type: array of Uplink description: Default TeamingPolicy associated with this UplinkProfile policy: description: Teaming policy required: true type: str required: true standby_list: description: List of Uplinks used in standby list required: false type: array of Uplink type: dict transport_vlan: description: VLAN used for tagging Overlay traffic of associated HostSwitch required: false type: int ''' EXAMPLES = ''' - name: Create a Hostswitch Profile nsxt_uplink_profiles: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False display_name: "uplinkProfile1", mtu: 1600, resource_type: "UplinkHostSwitchProfile", teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0, state: "present", ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def get_profile_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_host_switch_profiles(module, manager_url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(manager_url+ '/host-switch-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing host profiles. Error [%s]' % (to_native(err))) return resp def get_uplink_profile_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name): host_switch_profiles = get_host_switch_profiles(module, manager_url, mgr_username, mgr_password, validate_certs) for host_switch_profile in host_switch_profiles['results']: if host_switch_profile.__contains__('display_name') and host_switch_profile['display_name'] == display_name: return host_switch_profile return None def id_exist_in_list_dict_obj(key, list_obj1, list_obj2): all_id_presents = False if len(list_obj1) != len(list_obj2): return all_id_presents for dict_obj1 in list_obj1: if dict_obj1.__contains__(key): for dict_obj2 in list_obj2: if dict_obj2.__contains__(key) and dict_obj1[key] == dict_obj2[key]: all_id_presents = True continue if not all_id_presents: return False return True def cmp_dict(dict1, dict2): # dict1 contain dict2 #print dict2 for k2, v2 in dict2.items(): found = False if k2 not in dict1: continue if type(v2) != list and dict1[k2] != dict2[k2]: return False for obj2 in v2: for obj1 in dict1[k2]: if all(item in obj1.items() for item in obj2.items()): found = True if not found: return False return True def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, profile_params): existing_profile = get_uplink_profile_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, profile_params['display_name']) if existing_profile is None: return False if existing_profile.__contains__('mtu') and profile_params.__contains__('mtu') and \ existing_profile['mtu'] != profile_params['mtu']: return True if existing_profile.__contains__('transport_vlan') and profile_params.__contains__('transport_vlan') and \ existing_profile['transport_vlan'] != profile_params['transport_vlan']: return True if existing_profile.__contains__('lags') and not profile_params.__contains__('lags'): return True if not existing_profile.__contains__('lags') and profile_params.__contains__('lags'): return True if profile_params.__contains__('lags') and profile_params['lags']: existing_lags = existing_profile['lags'] new_lags = profile_params['lags'] sorted_existing_lags = sorted(existing_lags, key = lambda i: i['name']) sorted_new_lags = sorted(new_lags, key = lambda i: i['name']) if len(sorted_existing_lags) != len(sorted_new_lags): return True both_lags_same = True for i in range(len(sorted_existing_lags)): diff_obj = {k: sorted_existing_lags[i][k] for k in sorted_existing_lags[i] if k in sorted_new_lags[i] and sorted_existing_lags[i][k] != sorted_new_lags[i][k]} del diff_obj['uplinks'] if not cmp_dict(diff_obj, sorted_new_lags[i]): both_lags_same = False if not both_lags_same: return True if profile_params.__contains__('named_teamings'): existing_teamings = existing_profile['named_teamings'] new_teamings = profile_params['named_teamings'] sorted_existing_teamings = sorted(existing_teamings, key = lambda i: i['name']) sorted_new_teamings = sorted(new_teamings, key = lambda i: i['name']) if len(sorted_existing_teamings) != len(sorted_new_teamings): return False both_teamings_same = True for i in range(len(sorted_existing_teamings)): diff_obj = {k: sorted_existing_teamings[i][k] for k in sorted_existing_teamings[i] if k in sorted_new_teamings[i] and sorted_existing_teamings[i][k] != sorted_new_teamings[i][k]} if not cmp_dict(diff_obj, sorted_new_teamings[i]): both_teamings_same = False if not both_teamings_same: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(display_name=dict(required=True, type='str'), transport_vlan=dict(required=False, type='int'), description=dict(required=False, type='str'), enabled=dict(required=False, type='boolean'), host_infra_traffic_res=dict(required=False, type='list'), overlay_encap=dict(required=False, type='str'), named_teamings=dict(required=False, type='list'), mtu=dict(required=False, type='int'), required_capabilities=dict(required=False, type='list'), send_enabled=dict(required=False, type='boolean'), extra_configs=dict(required=False, type='list'), teaming=dict(required=True, type='dict', policy=dict(required=True, type='str'), standby_list=dict(required=False, type='list'), active_list=dict(required=True, type='list')), lags=dict(required=False, type='list'), tags=dict(required=False, type='list'), resource_type=dict(required=True, type='str', choices=['UplinkHostSwitchProfile']), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) profile_params = get_profile_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] display_name = module.params['display_name'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) host_switch_profile_dict = get_uplink_profile_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name) host_switch_profile_id, revision = None, None if host_switch_profile_dict: host_switch_profile_id = host_switch_profile_dict['id'] revision = host_switch_profile_dict['_revision'] if state == 'present': headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, profile_params) if not updated: # add the block if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(profile_params)), id='12345') request_data = json.dumps(profile_params) try: if host_switch_profile_id: module.exit_json(changed=False, id=host_switch_profile_id, message="Uplink profile with display_name %s already exist."% module.params['display_name']) (rc, resp) = request(manager_url+ '/host-switch-profiles', data=request_data, headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add host profile. Request body [%s]. Error[%s]." % (request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="host profile with display name %s created." % module.params['display_name']) else: if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(profile_params)), id=host_switch_profile_id) profile_params['_revision'] = revision # update current revision request_data = json.dumps(profile_params) id = host_switch_profile_id try: (rc, resp) = request(manager_url+ '/host-switch-profiles/%s' % id, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to update host profile with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err))) time.sleep(5) module.exit_json(changed=True, id=resp["id"], body= str(resp), message="host profile with id %s updated." % id) elif state == 'absent': # delete the array id = host_switch_profile_id if id is None: module.exit_json(changed=False, msg='No host switch profile exist with display name %s' % display_name) if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(profile_params)), id=id) try: (rc, resp) = request(manager_url + "/host-switch-profiles/%s" % id, method='DELETE', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs) except Exception as err: module.fail_json(msg="Failed to delete host profile with id %s. Error[%s]." % (id, to_native(err))) time.sleep(5) module.exit_json(changed=True, object_name=id, message="host profile with id %s deleted." % id) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_uplink_profiles_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_uplink_profiles_facts short_description: List Hostswitch Profiles description: Returns information about the configured hostswitch profiles. Hostswitch profiles define networking policies for hostswitches (sometimes referred to as bridges in OVS). Currently, only uplink teaming is supported. Uplink teaming allows NSX to load balance traffic across different physical NICs (PNICs) on the hypervisor hosts. Multiple teaming policies are supported, including LACP active, LACP passive, load balancing based on source ID, and failover order. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: List Hostswitch Profiles nsxt_uplink_profiles_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' import json from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/host-switch-profiles', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing host switch profiles. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_vidm.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2021 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_vidm short_description: 'Register a vIDM with NSX' description: "Register a vIDM with NSX" version_added: '3.2' author: 'Kaushik Lele' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str client_id: description: 'vIDM client id' required: true type: str client_secret: description: 'vIDM client secret' required: false type: str host_name: description: 'Fully Qualified Domain Name(FQDN) of vIDM' required: false type: str lb_enable: description: 'Load Balancer enable flag' required: false type: bool node_host_name: description: "Host name of the node redirected to host name to use when creating the redirect URL for clients to follow after authenticating to vIDM" required: true type: bool thumbprint: description: "vIDM certificate thumbprint Hexadecimal SHA256 hash of the vIDM server's X.509 certificate" required: true type: str state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Register vIDM with NSX nsxt_vidm: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False client_id: "OAuth2Client_NsxClientId", client_secret: "23424234234234" host_name: "lbhost_vidm.eng.vmware.com", lb_enable: False node_host_name: "jt-vidm.eng.vmware.com" thumbprint: "898b75618e3e56615d53f987a720ff22b6381f4b85bec1eb973214ff7361f8b8" state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native import ssl import socket import hashlib def get_vidm_params(args=None): args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def get_vidm(module, url, mgr_username, mgr_password, validate_certs): try: (rc, resp) = request(url, headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing vidm details. Error [%s]' % (to_native(err))) return resp def get_vidm_from_client_id(module, url, mgr_username, mgr_password, validate_certs, client_id): vidm = get_vidm(module, url, mgr_username, mgr_password, validate_certs) if vidm['client_id'] == client_id: return vidm return None def wait_till_create(module, url, mgr_username, mgr_password, validate_certs): retry_counter = 0 try: while True: try: (rc, resp) = request(url + '/status', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if resp['runtime_state'] != "ALL_OK": if retry_counter < 6: time.sleep(5) retry_counter = retry_counter + 1 else: module.fail_json( msg='Failed to register vIDM. runtime state is : %s' % (str(resp["runtime_state"]))) else: break; except Exception as err: # When registration is in progress and status is not yet accessible then it can throw error. # {'error_code': 36514, 'error_message': 'Error when requesting to verify VMware Identity Manager user access client', # So retry is needed in error case as well. if retry_counter < 6: retry_counter = retry_counter + 1 time.sleep(5) else: module.fail_json(msg='Failed to register vIDM. runtime state is : %s' % (to_native(err))) except Exception as err: module.fail_json(msg='Error accessing vIDM status. Error [%s]' % (to_native(err))) return def wait_till_delete(module, url, mgr_username, mgr_password, validate_certs): retry_counter = 0 try: while True: (rc, resp) = request(url + '/status', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) if resp['runtime_state'] != "NOT_OK" or resp['vidm_enable'] is not False: if retry_counter < 6: time.sleep(10) retry_counter = retry_counter + 1 else: module.fail_json(msg='Failed to unregister vIDM. runtime state is : %s registration flag is : %s' % (str(resp["runtime_state"]), str(resp["vidm_enable"]))) else: break; except Exception as err: module.fail_json(msg='Error accessing vIDM status. Error [%s]' % (to_native(err))) return def check_for_update(existing_vidm, vidm_params): if existing_vidm['client_id'] != vidm_params['client_id'] or \ existing_vidm['host_name'] != vidm_params['host_name'] or \ existing_vidm['client_id'] != vidm_params['client_id'] or \ existing_vidm['lb_enable'] != vidm_params['lb_enable'] or \ existing_vidm['node_host_name'] != vidm_params['node_host_name'] or \ existing_vidm['thumbprint'] != vidm_params['thumbprint'] or \ existing_vidm['vidm_enable'] != vidm_params['vidm_enable']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(client_id=dict(required=True, type='str'), client_secret=dict(required=False, type='str'), host_name=dict(required=True, type='str'), lb_enable=dict(required=False, type='bool'), node_host_name=dict(required=True, type='str'), thumbprint=dict(required=True, type='str'), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) vidm_params = get_vidm_params(module.params.copy()) state = module.params['state'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] client_id = module.params['client_id'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) vidm_api_url = manager_url + '/node/aaa/providers/vidm' existing_vidm = get_vidm_from_client_id(module, vidm_api_url, mgr_username, mgr_password, validate_certs, vidm_params['client_id']) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if state == 'present': vidm_params["vidm_enable"] = True if existing_vidm is not None: updated = check_for_update(existing_vidm, vidm_params) if not updated: module.exit_json(changed=False, id=vidm_params['client_id'], message="vIDM with id %s is already enabled." % vidm_params['client_id']) # vIDM not present or update. So call PUT API which is same for add and update vIDM request_data = json.dumps(vidm_params) if module.check_mode: module.exit_json(changed=True, debug_out=str(request_data), id='12345') try: (rc, resp) = request(vidm_api_url, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json( msg="Failed to register vIDM. Request body [%s]. Error[%s]." % (request_data, to_native(err))) wait_till_create(module, vidm_api_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=resp["client_id"], body=str(resp), message="vIDM with client id %s registered." % resp["client_id"]) elif state == 'absent': if module.check_mode: module.exit_json(changed=True, debug_out=str(json.dumps(vidm_params)), id=vidm_params['client_id']) if existing_vidm is None: module.exit_json(changed=False, id=vidm_params['client_id'], message="vIDM with client id %s was not registered." % vidm_params['client_id']) # vIDM with given client_id is registered so unregister it vidm_params['vidm_enable'] = False request_data = json.dumps(vidm_params) try: (rc, resp) = request(vidm_api_url, data=request_data, headers=headers, method='PUT', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json( msg="Failed to un-register vIDM. Request body [%s]. Error[%s]." % (request_data, to_native(err))) wait_till_delete(module, vidm_api_url, mgr_username, mgr_password, validate_certs) module.exit_json(changed=True, id=vidm_params['client_id'], body=str(resp), message="vIDM with id %s is unregistered." % vidm_params['client_id']) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_virtual_ip.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_virtual_ip short_description: 'Sets and clears cluster virtual IP address' description: "Sets the cluster virtual IP address. Note, all nodes in the management cluster must be in the same subnet. If not, a 409 CONFLICT status is returned. " version_added: '2.7' author: 'Rahul Raghuvanshi' options: hostname: description: 'Deployed NSX manager hostname.' required: true type: str username: description: 'The username to authenticate with the NSX manager.' required: true type: str password: description: 'The password to authenticate with the NSX manager.' required: true type: str virtual_ip_address: description: 'Virtual IPv4 address to be set.' required: true type: str virtual_ip6_address: description: 'Virtual IPv6 address to be set.' required: true type: str action: choices: - clear_virtual_ip - clear_virtual_ip6 description: "Action can be either 'clear_virtual_ip' or 'clear_virtual_ip6'. 'clear_virtual_ip' is used to clear Virtual IPv4. 'clear_virtual_ip6' is used to clear Virtual IPv6." required: true only if state is absent state: choices: - present - absent description: "State can be either 'present' or 'absent'. 'present' is used to create or update resource. 'absent' is used to delete resource." required: true ''' EXAMPLES = ''' - name: Adds cluster virtual IP address nsxt_virtual_ip: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False virtual_ip_address: "10.192.167.141" virtual_ip6_address: "2620:124:6020:c308::10" action: clear_virtual_ip state: present ''' RETURN = '''# ''' import json, time from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.common_utils import check_if_valid_ip, get_attribute_from_endpoint from ansible.module_utils._text import to_native def get_virtual_ip_params(args=None): args_to_remove = ['state','action', 'username', 'password', 'port', 'hostname', 'validate_certs'] for key in args_to_remove: args.pop(key, None) for key, value in args.copy().items(): if value == None: args.pop(key, None) return args def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, vip_params): if module.params['virtual_ip_address'] and module.params['virtual_ip6_address']: existing_vip = get_attribute_from_endpoint(module, manager_url, '/cluster/api-virtual-ip', mgr_username, mgr_password, validate_certs, 'ip_address') existing_vip6 = get_attribute_from_endpoint(module, manager_url, '/cluster/api-virtual-ip', mgr_username, mgr_password, validate_certs, 'ip6_address') if existing_vip != vip_params['virtual_ip_address'] and existing_vip6 != vip_params['virtual_ip6_address']: return True elif module.params['virtual_ip_address']: existing_vip = get_attribute_from_endpoint(module, manager_url, '/cluster/api-virtual-ip', mgr_username, mgr_password, validate_certs, 'ip_address') if existing_vip != vip_params['virtual_ip_address']: return True elif module.params['virtual_ip6_address']: existing_vip6 = get_attribute_from_endpoint(module, manager_url, '/cluster/api-virtual-ip', mgr_username, mgr_password, validate_certs, 'ip6_address') if existing_vip6 != vip_params['virtual_ip6_address']: return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(virtual_ip_address=dict(type='str'), virtual_ip6_address=dict(type='str'), action=dict(required_if=[('state', 'absent')], type='str', choices=['clear_virtual_ip', 'clear_virtual_ip6']), state=dict(required=True, choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[('virtual_ip_address', 'virtual_ip6_address')]) virtual_ip_params = get_virtual_ip_params(module.params.copy()) state = module.params['state'] action = module.params['action'] mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) headers = dict(Accept="application/json") headers['Content-Type'] = 'application/json' if state == 'present': # add virtual IP address if module.params['virtual_ip_address']: virtual_ip_address = virtual_ip_params['virtual_ip_address'] if not check_if_valid_ip(virtual_ip_address): module.fail_json(msg="Virtual IP provided is invalid.") if module.params['virtual_ip6_address']: virtual_ip6_address = virtual_ip_params['virtual_ip6_address'] if not check_if_valid_ip(virtual_ip6_address): module.fail_json(msg="Virtual IP provided is invalid.") if module.check_mode: module.exit_json(changed=False, debug_out="Cluster virtual IP would have been updated to %s" % virtual_ip_params, id='12345') updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, virtual_ip_params) if not updated: if module.params['virtual_ip_address'] and module.params['virtual_ip6_address']: module.exit_json(changed=False, message="Virtual IPs %s and %s are already set." % (virtual_ip_address, virtual_ip6_address)) elif module.params['virtual_ip_address']: module.exit_json(changed=False, message="Virtual IP %s already set." % virtual_ip_address) elif module.params['virtual_ip6_address']: module.exit_json(changed=False, message="Virtual IP %s already set." % virtual_ip6_address) else: try: if module.params['virtual_ip_address'] and module.params['virtual_ip6_address']: (rc, resp) = request(manager_url + '/cluster/api-virtual-ip?action=set_virtual_ip&ip_address=%s&ip6_address=%s' % (virtual_ip_address, virtual_ip6_address), data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) elif module.params['virtual_ip_address']: (rc, resp) = request(manager_url + '/cluster/api-virtual-ip?action=set_virtual_ip&ip_address=%s' %virtual_ip_address, data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) elif module.params['virtual_ip6_address']: (rc, resp) = request(manager_url + '/cluster/api-virtual-ip?action=set_virtual_ip&ip6_address=%s' % virtual_ip6_address, data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to add virtual IP address. Error[%s]." % to_native(err)) time.sleep(5) if module.params['virtual_ip_address'] and module.params['virtual_ip6_address']: module.exit_json(changed=True, result=resp, message="Virtual IP address is set with ip addresses: %s and " "%s " % (virtual_ip_address, virtual_ip6_address)) elif module.params['virtual_ip_address']: module.exit_json(changed=True, result=resp, message="Virtual IP address is set with IPv4 address: %s " % virtual_ip_address) elif module.params['virtual_ip6_address']: module.exit_json(changed=True, result=resp, message="Virtual IP address is set with IPv6 address: %s " % virtual_ip6_address) elif state == 'absent': # delete virtual IP address if action =="clear_virtual_ip": is_virtual_ip_set = True virtual_ip_address = get_attribute_from_endpoint(module, manager_url, '/cluster/api-virtual-ip', mgr_username, mgr_password, validate_certs, 'ip_address') if virtual_ip_address is None or virtual_ip_address == '0.0.0.0': virtual_ip_address = "Virtual IP address is not set" is_virtual_ip_set = False if module.check_mode: if not is_virtual_ip_set: module.exit_json(changed=True, debug_out='Virtual IPv4 address is not set', id=virtual_ip_address) else: module.exit_json(changed=True, debug_out='Virtual IPv4 address is set to %s. Will be removed.'% virtual_ip_address, id=virtual_ip_address) time.sleep(5) if not is_virtual_ip_set: module.exit_json(changed=False, object_name="Virtual IPv4 was not set before.", message="Cleared cluster virtual IPv4 address.") try: (rc, resp) = request(manager_url+ '/cluster/api-virtual-ip?action=clear_virtual_ip', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to clear virtual IPv4 address. Error[%s]." % to_native(err)) module.exit_json(changed=True, object_name=virtual_ip_address, message="Cleared cluster virtual IPv4 address.") if action =="clear_virtual_ip6": is_virtual_ip6_set = True virtual_ip6_address = get_attribute_from_endpoint(module, manager_url, '/cluster/api-virtual-ip', mgr_username, mgr_password, validate_certs, 'ip6_address') if virtual_ip6_address is None or virtual_ip6_address == '::': virtual_ip6_address = "Virtual IPv6 address is not set" is_virtual_ip6_set = False if module.check_mode: if not is_virtual_ip6_set: module.exit_json(changed=True, debug_out='Virtual IPv6 address is not set', id=virtual_ip6_address) else: module.exit_json(changed=True, debug_out='Virtual IPv6 address is set to %s. Will be removed.' % virtual_ip6_address, id=virtual_ip6_address) time.sleep(5) if not is_virtual_ip6_set: module.exit_json(changed=False, object_name="Virtual IPv6 was not set before.", message="Cleared cluster virtual IPv6 address.") try: (rc, resp) = request(manager_url + '/cluster/api-virtual-ip?action=clear_virtual_ip6', data='', headers=headers, method='POST', url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg="Failed to clear virtual IPv6 address. Error[%s]." % to_native(err)) module.exit_json(changed=True, object_name=virtual_ip6_address, message="Cleared cluster virtual IPv6 address.") if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_virtual_ip_facts.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_vitual_ip_facts short_description: Read cluster virtual IP address description: Returns the configured cluster virtual IP address or null if not configured. version_added: "2.7" author: Rahul Raghuvanshi options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. required: true type: str password: description: The password to authenticate with the NSX manager. required: true type: str ''' EXAMPLES = ''' - name: Get all configured cluster virtual IP address nsxt_virtual_ip_facts: hostname: "10.192.167.137" username: "admin" password: "Admin!23Admin" validate_certs: False ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.vmware_nsxt import vmware_argument_spec, request from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) mgr_hostname = module.params['hostname'] mgr_username = module.params['username'] mgr_password = module.params['password'] validate_certs = module.params['validate_certs'] manager_url = 'https://{}/api/v1'.format(mgr_hostname) changed = False try: (rc, resp) = request(manager_url+ '/cluster/api-virtual-ip', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing virtual IP information. Error [%s]' % (to_native(err))) module.exit_json(changed=changed, **resp) if __name__ == '__main__': main() ================================================ FILE: plugins/modules/nsxt_vm_tags.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nsxt_vm_tags short_description: Update tags on NSXT VM description: Update tags on NSXT VM version_added: "2.8" author: Gautam Verma options: hostname: description: Deployed NSX manager hostname. required: true type: str username: description: The username to authenticate with the NSX manager. type: str password: description: - The password to authenticate with the NSX manager. - Must be specified if username is specified type: str validate_certs: description: Enable server certificate verification. type: bool default: False ca_path: description: Path to the CA bundle to be used to verify host's SSL certificate type: str nsx_cert_path: description: Path to the certificate created for the Principal Identity using which the CRUD operations should be performed type: str nsx_key_path: description: - Path to the certificate key created for the Principal Identity using which the CRUD operations should be performed - Must be specified if nsx_cert_path is specified type: str request_headers: description: HTTP request headers to be sent to the host while making any request type: dict add_tags: type: list element: dict description: List of tags to be applied to the virtual machine suboptions: scope: description: Tag scope. default: "" type: str tag: description: Tag value. default: "" type: str remove_tags_with_scope: type: list element: str description: - Specify the scope of the tags that should be removed - If remove_other_tags is True, this becomes do not care virtual_machine_id: description: The identifier that is used in the enforcement point that uniquely identifies the virtual machine. In case of NSXT it would be the value of the external_id of the virtual machine. type: str virtual_machine_display_name: description: Display name of the VM whose tags are to be updated. Either this or virtual_machine_id must be specified. If both are specified, virtual_machine_id is used type: str remove_other_tags: description: - Remove the tags that are not specified in the add_tags - Caution; If this is True, all tags that are not in add_tags will be removed default: false ''' EXAMPLES = ''' - name: Update Tags on VMs nsxt_vm_tags: hostname: "10.10.10.10" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: False virtual_machine_display_name: App-VM-1 remove_other_tags: False add_tags: - scope: my-scope tag: my-tag remove_tags_with_scope: - my-scope1 ''' RETURN = '''# ''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_communicator import PolicyCommunicator from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_resource_urls import VM_LIST_URL, VM_UPDATE_URL from ansible.module_utils._text import to_native def get_resource_spec(): vm_tag_spec = PolicyCommunicator.get_vmware_argument_spec() vm_tag_spec.update(dict( virtual_machine_id=dict(type='str'), virtual_machine_display_name=dict(type='str'), add_tags=dict(type='list', elements='dict', default=[], options=dict( scope=dict(required=False, type='str', default=""), tag=dict(required=False, type='str', default=""))), remove_other_tags=dict( required=False, default=False, type='bool')), remove_tags_with_scope=dict(type='list', elements='str', default=[])) return vm_tag_spec class TagElement(object): def __init__(self, tag): self.scope, self.tag = tag['scope'], tag.get('tag') self.element = (self.scope, self.tag) def __eq__(self, other): if self.tag is None: return self.scope == other.scope return self.element == other.element def __hash__(self): return hash(self.element) def _fetch_all_tags_on_vm_and_infer_id( vm_id, policy_communicator, vm_display_name, module): target_vm = None if vm_id: _, vms = policy_communicator.request(VM_LIST_URL+'?external_id='+vm_id, base_url='fabric') if vms['result_count'] == 0: module.fail_json(msg="No VM found with the provided " "virtual_machine_id") elif vms['result_count'] == 1: return vms['results'][0].get('tags', []), vm_id else: # Multiple VMs with same external id name. # This should not happen. module.fail_json(msg="Multiple VMs with same external " "id. Please investigate the environment.") else: _, vms = policy_communicator.request(VM_LIST_URL+'?display_name='+vm_display_name, base_url='fabric') if vms['result_count'] == 0: module.fail_json(msg="No VM found with the provided " "virtual_machine_display_name") elif vms['result_count'] == 1: return vms['results'][0].get('tags', []), vms['results'][0]['external_id'] else: # Multiple VMs with same display name. Ask user # to provide VM ID instead module.fail_json(msg="Multiple VMs with same display " "name. Please provide " "virtual_machine_id to identify the " "target VM") def _get_tags_as_set(tags=[], scope_list=[]): tag_set = set() if tags: for tag in tags: if tag['scope'] is None: tag['scope'] = '' tag_set.add(TagElement(tag)) if scope_list: for scope in scope_list: tag_set.add(TagElement({'scope': scope, 'tag': ""})) return tag_set def _read_tags_from_module_params(module_params, tag_identifier): return module_params[tag_identifier] or [] def realize(): module = AnsibleModule( argument_spec=get_resource_spec(), supports_check_mode=False) virtual_machine_id = module.params['virtual_machine_id'] virtual_machine_display_name = None if not virtual_machine_id: virtual_machine_display_name = module.params[ 'virtual_machine_display_name'] if not virtual_machine_display_name: module.fail_json(msg="Please specify either virtual_machine_id or " "virtual_machine_display_name in the " "playbook") mgr_hostname = module.params.pop('hostname') mgr_username = module.params.pop('username') mgr_password = module.params.pop('password') nsx_cert_path = module.params['nsx_cert_path'] nsx_key_path = module.params['nsx_key_path'] request_headers = module.params['request_headers'] ca_path = module.params['ca_path'] validate_certs = module.params.pop('validate_certs') try: # Each manager has an associated PolicyCommunicator policy_communicator = PolicyCommunicator.get_instance( mgr_hostname, mgr_username, mgr_password, nsx_cert_path, nsx_key_path, request_headers, ca_path, validate_certs) all_tags, virtual_machine_id = _fetch_all_tags_on_vm_and_infer_id( virtual_machine_id, policy_communicator, virtual_machine_display_name, module) init_tags_set = _get_tags_as_set(tags=all_tags) if module.params.get('remove_other_tags'): tags_to_add = _get_tags_as_set(tags=_read_tags_from_module_params( module.params, 'add_tags')) for i, tag in enumerate(all_tags): if TagElement(tag) not in tags_to_add: all_tags[i] = None elif _read_tags_from_module_params( module.params, 'remove_tags_with_scope'): tags_to_remove = _get_tags_as_set( scope_list=_read_tags_from_module_params( module.params, 'remove_tags_with_scope')) for i, tag in enumerate(all_tags): if TagElement(tag) in tags_to_remove: all_tags[i] = None final_tags = [tag for tag in all_tags if tag is not None] final_tags_set = _get_tags_as_set(tags=final_tags) for tag in _read_tags_from_module_params(module.params, 'add_tags'): tag_element = TagElement(tag) if tag_element not in final_tags_set: final_tags += tag, final_tags_set.add(tag_element) if init_tags_set == final_tags_set: module.exit_json(msg="No tags detected to update") post_body = { "external_id": virtual_machine_id, "tags": final_tags } policy_communicator.request( VM_UPDATE_URL + '?action=update_tags', data=post_body, method="POST", base_url='fabric') module.exit_json(msg="Successfully updated tags on VM {}".format( virtual_machine_id), changed=True) except Exception as err: module.fail_json(msg="Failed to update tags on VM {} as API " "returned error: {}. Please try " "again".format(virtual_machine_id, err)) if __name__ == '__main__': realize() ================================================ FILE: tests/playbooks/mp/answerfile.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- hostname: "10.192.254.207" username: "admin" password: "Admin!23Admin" validate_certs: False ip_pools: - display_name: IPPool-IPV4-1 subnets: - allocation_ranges: - start: "10.112.201.28" end: "10.112.201.35" cidr: "10.112.201.0/24" transportzones: - display_name: "TZ1" tz_type: "VLAN_BACKED" # - display_name: "TZ2" # tz_type: "OVERLAY_BACKED" uplink_profiles: - display_name: uplinkProfile1 teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0 # - display_name: uplinkProfile2 # teaming: # active_list: # - uplink_name: "uplink-2" # uplink_type: PNIC # policy: FAILOVER_ORDER # transport_vlan: 0 host_transport_nodes: - display_name: TN1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 host_switch_mode: STANDARD pnics: - device_name: "vmnic1" uplink_name: "uplink-1" ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" transport_zone_endpoints: - transport_zone_name: "TZ1" node_deployment_info: resource_type: "HostNode" display_name: "Host_1" ip_addresses: ["10.161.136.35"] os_type: "ESXI" os_version: "6.5.0" host_credential: username: "root" password: "ca$hc0w" edge_transport_nodes: - display_name: EdgeTN1 host_switches: - host_switch_profiles: - name: nsx-edge-single-nic-uplink-profile type: UplinkHostSwitchProfile host_switch_name: nsxHostSwitch host_switch_mode: STANDARD pnics: - device_name: "fp-eth0" uplink_name: "uplink-1" ip_assignment_spec: resource_type: StaticIpListSpec ip_list: ["192.168.32.1"] subnet_mask: "255.255.255.254" default_gateway: "192.168.32.0" transport_zone_endpoints: - transport_zone_name: "nsx-overlay-transportzone" node_deployment_info: resource_type: "EdgeNode" display_name: "EdgeTN1" deployment_type: "VIRTUAL_MACHINE" deployment_config: form_factor: "SMALL" node_user_settings: cli_username: "admin" cli_password: "Admin!23Admin" root_password: "Admin!23Admin" vm_deployment_config: ipv4_assignment_enabled: false ipv6_assignment_type: "STATIC" management_port_subnets: - ip_addresses: - "2002:0:0:0:0:0:0:10" prefix_length: 64 default_gateway_addresses: - "2002:0:0:0:0:0:0:10" placement_type: VsphereDeploymentConfig vc_name: "VC" vc_username: "administrator@vsphere.local" vc_password: "VMware$123" data_networks: - VM Network - VM Network - VM Network management_network: "VM Network" compute: "HostCluster1" storage: "datastore-esx67-4" host: "10.105.15.236" node_settings: allow_ssh_root_login: true enable_ssh: true hostname: "edge.vmware.com" transport_node_profiles: - display_name: TNP1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 host_switch_mode: STANDARD pnics: - device_name: vmnic1 uplink_name: "uplink-1" ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" compute_managers: - display_name: "VC1" server: "10.161.129.87" origin_type: vCenter credential_type: UsernamePasswordLoginCredential username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "46:1E:31:DD:0B:37:4C:F0:91:5B:49:A1:A1:94:B5:DF:82:93:90:52:D9:68:0F:86:C8:CA:6C:34:CB:82:D7:D5" route_advertise: display_name: "tier-1" # Must be a tier 1 router name enabled: True advertise_dns_forwarder: False advertise_lb_snat_ip: True advertise_lb_vip: True advertise_nat_routes: True advertise_nsx_connected_routes: True advertise_static_routes: True # # vIDM properties # vidm: client_id: "NSX_client_credentials_client_ID" client_secret: "NSX_client_credentials_client_SECRET" host_name: "colo-vshield3-dhcp168.eng.vmware.com" lb_enable: False node_host_name: "10.186.17.88" thumbprint: "BF838E7A1CF7B84F7B556F35E0D9A0A365F6AE021885809A2D42AAF78A06B0A4" # # One or more local managers that have to be registered with NSX # local_managers: - display_name: "My LM1" id: "LM1" fqdn: "10.186.3.163" username: "admin" password: "Admin!23Admin" thumbprint: "ec8ba0322b987bc7bce14097667104f43340befce84d75e0820b02a2f3839441" - id: "LM2" display_name: "" fqdn: "10.182.6.97" username: "admin" password: "Admin!23Admin" thumbprint: "d2ec50124fcd487c37d46f85d8f6f80b24441f4e5ed9cac5c8c47d2649043078" # # One or more global managers that have to be registered with current global manager # global_managers: - display_name: "10.92.78.184" fqdn: "10.92.78.184" username: "admin" password: "Admin!23Admin" thumbprint: "703e69dce489beaa1a683d12a16efbf9c81630d23c5f32fc25becc5340169f1e" mode: "STANDBY" ================================================ FILE: tests/playbooks/mp/answerfile_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- hostname: "10.192.254.207" username: "admin" password: "Admin!23Admin" validate_certs: False ip_pools: - display_name: IPPool-IPV4-1 pool_static_subnets: - display_name: test-ip-static-subnet-1 state: present allocation_ranges: - start: "10.112.201.28" end: "10.112.201.35" cidr: "10.112.201.0/24" transportzones: - display_name: "TZ1" tz_type: "VLAN_BACKED" # - display_name: "TZ2" # tz_type: "OVERLAY_BACKED" uplink_profiles: - display_name: uplinkProfile1 teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0 # - display_name: uplinkProfile2 # teaming: # active_list: # - uplink_name: "uplink-2" # uplink_type: PNIC # policy: FAILOVER_ORDER # transport_vlan: 0 host_transport_nodes: - display_name: TN1 resource_type: "HostTransportNode" host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 host_switch_mode: ENS_INTERRUPT # host_switch_mode: STANDARD host_switch_type: VDS uplinks: - vds_uplink_name: Uplink 1 uplink_name: uplink-1 ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" node_deployment_info: resource_type: "HostNode" display_name: "Host_1" ip_addresses: ["10.161.136.35"] os_type: "ESXI" os_version: "9.0.0" host_credential: username: "root" password: "ca$hc0w" edge_transport_nodes: - display_name: EdgeTN1 host_switches: - host_switch_profiles: - name: nsx-edge-single-nic-uplink-profile type: UplinkHostSwitchProfile host_switch_name: nsxHostSwitch host_switch_mode: STANDARD pnics: - device_name: "fp-eth0" uplink_name: "uplink-1" ip_assignment_spec: resource_type: StaticIpListSpec ip_list: ["192.168.32.1"] subnet_mask: "255.255.255.254" default_gateway: "192.168.32.0" transport_zone_endpoints: - transport_zone_name: "nsx-overlay-transportzone" node_deployment_info: resource_type: "EdgeNode" display_name: "EdgeTN1" deployment_type: "VIRTUAL_MACHINE" deployment_config: form_factor: "SMALL" node_user_settings: cli_username: "admin" cli_password: "Admin!23Admin" root_password: "Admin!23Admin" vm_deployment_config: ipv4_assignment_enabled: false ipv6_assignment_type: "STATIC" management_port_subnets: - ip_addresses: - "2002:0:0:0:0:0:0:10" prefix_length: 64 default_gateway_addresses: - "2002:0:0:0:0:0:0:10" placement_type: VsphereDeploymentConfig vc_name: "VC" vc_username: "administrator@vsphere.local" vc_password: "VMware$123" data_networks: - VM Network - VM Network - VM Network management_network: "VM Network" compute: "HostCluster1" storage: "datastore-esx67-4" host: "10.105.15.236" node_settings: allow_ssh_root_login: true enable_ssh: true hostname: "edge.vmware.com" transport_node_profiles: - display_name: TNP1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile # host_switch_name: hostswitch1 host_switch_id: "50 17 4c 3f 86 bf 44 be-11 6f 69 80 90 f7 22 21" host_switch_mode: STANDARD host_switch_type: VDS uplinks: - vds_uplink_name: Uplink 1 uplink_name: uplink-1 ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" compute_managers: - display_name: "VC1" server: "10.161.129.87" origin_type: vCenter credential_type: UsernamePasswordLoginCredential username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "46:1E:31:DD:0B:37:4C:F0:91:5B:49:A1:A1:94:B5:DF:82:93:90:52:D9:68:0F:86:C8:CA:6C:34:CB:82:D7:D5" route_advertise: display_name: "tier-1" # Must be a tier 1 router name enabled: True advertise_dns_forwarder: False advertise_lb_snat_ip: True advertise_lb_vip: True advertise_nat_routes: True advertise_nsx_connected_routes: True advertise_static_routes: True # # vIDM properties # vidm: client_id: "NSX_client_credentials_client_ID" client_secret: "NSX_client_credentials_client_SECRET" host_name: "colo-vshield3-dhcp168.eng.vmware.com" lb_enable: False node_host_name: "10.186.17.88" thumbprint: "BF838E7A1CF7B84F7B556F35E0D9A0A365F6AE021885809A2D42AAF78A06B0A4" # # One or more local managers that have to be registered with NSX # local_managers: - display_name: "My LM1" id: "LM1" fqdn: "10.186.3.163" username: "admin" password: "Admin!23Admin" thumbprint: "ec8ba0322b987bc7bce14097667104f43340befce84d75e0820b02a2f3839441" - id: "LM2" display_name: "" fqdn: "10.182.6.97" username: "admin" password: "Admin!23Admin" thumbprint: "d2ec50124fcd487c37d46f85d8f6f80b24441f4e5ed9cac5c8c47d2649043078" # # One or more global managers that have to be registered with current global manager # global_managers: - display_name: "10.92.78.184" fqdn: "10.92.78.184" username: "admin" password: "Admin!23Admin" thumbprint: "703e69dce489beaa1a683d12a16efbf9c81630d23c5f32fc25becc5340169f1e" mode: "STANDBY" ================================================ FILE: tests/playbooks/mp/answerfile_attach_tnp_to_cluster.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- hostname: "10.161.157.200" username: "admin" password: "Admin!23Admin" validate_certs: False ip_pools: - display_name: IPPool-IPV4-1 subnets: - allocation_ranges: - start: "10.112.201.28" end: "10.112.201.35" cidr: "10.112.201.0/24" transportzones: - display_name: "TZ1" transport_type: "OVERLAY" host_switch_name: "hostswitch1" uplink_profiles: - display_name: uplinkProfile1 teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0 transport_node_profiles: - display_name: TNP1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 pnics: - device_name: vmnic1 uplink_name: "uplink-1" ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" compute_managers: - display_name: "VC1" server: "10.161.129.87" origin_type: vCenter credential_type: UsernamePasswordLoginCredential username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "46:1E:31:DD:0B:37:4C:F0:91:5B:49:A1:A1:94:B5:DF:82:93:90:52:D9:68:0F:86:C8:CA:6C:34:CB:82:D7:D5" ================================================ FILE: tests/playbooks/mp/answerfile_attach_tnp_to_cluster_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- hostname: "10.161.157.200" username: "admin" password: "Admin!23Admin" validate_certs: False ip_pools: - display_name: IPPool-IPV4-1 pool_static_subnets: - display_name: test-ip-static-subnet-1 state: present allocation_ranges: - start: "10.112.201.28" end: "10.112.201.35" cidr: "10.112.201.0/24" transportzones: - display_name: "TZ1" tz_type: "OVERLAY_BACKED" uplink_profiles: - display_name: uplinkProfile1 teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0 transport_node_profiles: - display_name: TNP1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 host_switch_type: VDS uplinks: - vds_uplink_name: Uplink 1 uplink_name: uplink-1 ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" compute_managers: - display_name: "VC1" server: "10.161.129.87" origin_type: vCenter credential_type: UsernamePasswordLoginCredential username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "46:1E:31:DD:0B:37:4C:F0:91:5B:49:A1:A1:94:B5:DF:82:93:90:52:D9:68:0F:86:C8:CA:6C:34:CB:82:D7:D5" ================================================ FILE: tests/playbooks/mp/answerfile_tn.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- hostname: "10.161.157.200" username: "admin" password: "Admin!23Admin" validate_certs: False ip_pools: - display_name: IPPool-IPV4-1 subnets: - allocation_ranges: - start: "10.112.201.28" end: "10.112.201.35" cidr: "10.112.201.0/24" transportzones: - display_name: "TZ1" transport_type: "OVERLAY" host_switch_name: "hostswitch1" uplink_profiles: - display_name: uplinkProfile1 teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0 transport_nodes: - display_name: TN1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 # pnics: # - device_name: "vmnic1" # uplink_name: "uplink-1" ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" node_deployment_info: # Host node deployment info ESXI # resource_type: "HostNode" # display_name: "Host_1" # ip_addresses: ["10.161.136.35"] # os_type: "ESXI" # os_version: "6.5.0" # host_credential: # username: "root" # password: "ca$hc0w" # thumbprint: "aba87c8b3a042435e0f3d60784c1fbc6f1aba5ce71f6efb2601fd26fb5453bb0" resource_type: "HostNode" display_name: "Host_3" ip_addresses: ["10.192.44.93"] os_type: "UBUNTUKVM" os_version: "16.04" host_credential: username: "root" password: "ca$hc0w" thumbprint: "3fS+Ik4O0GOMuQ8Chbxfn7KBLjmLhhnEHPKDXnt/AFQ=" ================================================ FILE: tests/playbooks/mp/answerfile_tn_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- hostname: "10.161.157.200" username: "admin" password: "Admin!23Admin" validate_certs: False ip_pools: - display_name: IPPool-IPV4-1 pool_static_subnets: - display_name: test-ip-static-subnet-1 state: present allocation_ranges: - start: "10.112.201.28" end: "10.112.201.35" cidr: "10.112.201.0/24" transportzones: - display_name: "TZ1" tz_type: "OVERLAY_BACKED" uplink_profiles: - display_name: uplinkProfile1 teaming: active_list: - uplink_name: "uplink-1" uplink_type: PNIC policy: FAILOVER_ORDER transport_vlan: 0 transport_nodes: - display_name: TN1 host_switches: - host_switch_profiles: - name: uplinkProfile1 type: UplinkHostSwitchProfile host_switch_name: hostswitch1 host_switch_type: VDS host_switch_mode: ENS_INTERRUPT uplinks: - vds_uplink_name: Uplink 1 uplink_name: uplink-1 ip_assignment_spec: resource_type: StaticIpPoolSpec ip_pool_name: "IPPool-IPV4-1" transport_zone_endpoints: - transport_zone_name: "TZ1" node_deployment_info: # Host node deployment info ESXI resource_type: "HostNode" display_name: "Host_1" ip_addresses: ["10.161.96.247"] os_type: "ESXI" os_version: "9.0.0" host_credential: username: "root" password: "ca$hc0w" thumbprint: "85:03:6A:33:CA:AF:EE:24:67:C0:02:F5:B0:77:AE:DD:FC:31:19:93:44:77:76:C9:E6:4B:61:4D:9A:9E:72:98" ================================================ FILE: tests/playbooks/mp/test_attach_tnp_to_cluster.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_attach_tnp_to_cluster.yml tasks: - name: Register compute manager vmware.ansible_for_nsxt.nsxt_fabric_compute_managers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" server: "{{item.server}}" origin_type: "{{item.origin_type}}" credential: credential_type: "{{item.credential_type}}" username: "{{item.username}}" password: "{{item.password}}" thumbprint: "{{item.thumbprint}}" state: present with_items: - "{{compute_managers}}" - name: Create ip pool vmware.ansible_for_nsxt.nsxt_ip_pools: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" subnets: "{{item.subnets}}" state: present with_items: - "{{ip_pools}}" - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "TransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" transport_type: "{{item.transport_type}}" host_switch_name: "{{item.host_switch_name}}" state: "present" with_items: - "{{transportzones}}" - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" mtu: 1600 teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" - name: Create transport node profile vmware.ansible_for_nsxt.nsxt_transport_node_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: TransportNodeProfile display_name: "{{item.display_name}}" description: NSX configured Test Transport Node Profile host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" transport_zone_endpoints: "{{item.transport_zone_endpoints}}" state: present with_items: - "{{transport_node_profiles}}" - name: Attach Transport node profile to cluster vmware.ansible_for_nsxt.nsxt_transport_node_collections: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "TNC1" resource_type: "TransportNodeCollection" description: "Transport Node Collections 1" compute_manager_name: "VC1" cluster_name: "os-compute-cluster-1" transport_node_profile_name: "TNP1" state: present ================================================ FILE: tests/playbooks/mp/test_attach_tnp_to_cluster_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_attach_tnp_to_cluster_9x.yml tasks: - name: Register compute manager vmware.ansible_for_nsxt.nsxt_fabric_compute_managers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" server: "{{item.server}}" origin_type: "{{item.origin_type}}" credential: credential_type: "{{item.credential_type}}" username: "{{item.username}}" password: "{{item.password}}" thumbprint: "{{item.thumbprint}}" state: present with_items: - "{{compute_managers}}" - name: Create ip pool vmware.ansible_for_nsxt.nsxt_policy_ip_pool: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" pool_static_subnets: "{{item.pool_static_subnets}}" state: present with_items: - "{{ip_pools}}" - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "PolicyTransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" tz_type: "{{item.tz_type}}" state: "present" with_items: - "{{transportzones}}" - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" - name: Create transport node profile vmware.ansible_for_nsxt.nsxt_transport_node_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: PolicyHostTransportNodeProfile display_name: "{{item.display_name}}" description: NSX configured Test Transport Node Profile host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" state: present with_items: - "{{transport_node_profiles}}" - name: Attach Transport node profile to cluster vmware.ansible_for_nsxt.nsxt_transport_node_collections: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "TNC1" resource_type: "TransportNodeCollection" description: "Transport Node Collections 1" compute_manager_name: "VC1" cluster_name: "os-compute-cluster-1" transport_node_profile_name: "TNP1" state: present ================================================ FILE: tests/playbooks/mp/test_basic_topology.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: deploy NSX Manager OVA vmware.ansible_for_nsxt.nsxt_deploy_ova: ovftool_path: "/usr/bin" datacenter: "private_dc" datastore: "data store" portgroup: "VM Network" cluster: "nsxt_cluster" vmname: "nsxt-manager" hostname: "nsxt-manager-10" dns_server: "20.162.244.213" dns_domain: "eng.vmware.com" ntp_server: "123.110.200.124" gateway: "10.112.203.253" ip_address: "40.112.201.24" netmask: "255.255.224.0" admin_password: "Admin!23Admin" cli_password: "Admin!23Admin" path_to_ova: "http://build-squid.eng.vmware.com/build/mts/release/bora-8411846/publish/nsx-unified-appliance/exports/ovf" ova_file: "nsx-unified-appliance-2.2.0.0.0.8411854.ovf" vcenter: "10.161.244.213" vcenter_user: "administrator@vsphere.local" vcenter_passwd: "Admin!23" deployment_size: "small" role: "nsx-manager nsx-controller" - name: Check manager status vmware.ansible_for_nsxt.nsxt_manager_status: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False wait_time: 50 - name: Deploy compute manager vmware.ansible_for_nsxt.nsxt_fabric_compute_managers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "vCenter" server: "10.161.244.213" origin_type: vCenter credential: credential_type: UsernamePasswordLoginCredential username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "36:43:34:D9:C2:06:27:4B:EE:C3:4A:AE:23:BF:76:A0:0C:4D:D6:8A:D3:16:55:97:62:07:C2:84:0C:D8:BA:66" state: present register: compute_manager - name: Deploy controller vmware.ansible_for_nsxt.nsxt_controllers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False deployment_requests: - roles: - CONTROLLER form_factor: "MEDIUM" user_settings: cli_password: "Admin!23Admin" root_password: "Admin!23Admin" deployment_config: placement_type: VsphereClusterNodeVMDeploymentConfig vc_id: "{{compute_manager.id}}" management_network_id: "network-44" hostname: "controller-1" compute_id: "domain-c49" storage_id: "datastore-43" default_gateway_addresses: - 11.122.203.253 management_port_subnets: - ip_addresses: - 11.142.201.25 prefix_length: "19" clustering_config: clustering_type: ControlClusteringConfig shared_secret: "123456" join_to_existing_cluster: false state: present - name: Create ip pool vmware.ansible_for_nsxt.nsxt_ip_pools: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" subnets: "{{item.subnets}}" state: present with_items: - "{{ip_pools}}" - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "TransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" transport_type: "{{item.transport_type}}" host_switch_name: "{{item.host_switch_name}}" #zone_id: "21ff0e36-1624-4c18-be2f-070513079185" state: "present" with_items: - "{{transportzones}}" # - name: Create logical switch # nsxt_logical_switches: # hostname: "{{hostname}}" # username: "{{username}}" # password: "{{password}}" # validate_certs: False # display_name: "test_lswitch" # replication_mode: SOURCE # admin_state: UP # transport_zone_id: "{{transport_zone.id}}" # state: "present" # register: logical_switch - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" mtu: 1600 teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" - name: Create transport node vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: TransportNode display_name: "{{item.display_name}}" description: NSX configured Test Transport Node host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" transport_zone_endpoints: "{{item.transport_zone_endpoints}}" fabric_node_name: "{{item.fabric_node_name}}" state: present with_items: - "{{transport_nodes}}" #- debug: var=deploy_nsx_ova ================================================ FILE: tests/playbooks/mp/test_basic_topology_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_9x.yml tasks: - name: deploy NSX Manager OVA vmware.ansible_for_nsxt.nsxt_deploy_ova: ovftool_path: "/usr/bin" datacenter: "private_dc" datastore: "data store" portgroup: "VM Network" cluster: "nsxt_cluster" vmname: "nsxt-manager" hostname: "nsxt-manager-10" dns_server: "20.162.244.213" dns_domain: "eng.vmware.com" ntp_server: "123.110.200.124" gateway: "10.112.203.253" ip_address: "40.112.201.24" netmask: "255.255.224.0" admin_password: "Admin!23Admin" cli_password: "Admin!23Admin" path_to_ova: "http://build-squid.vcfd.broadcom.net/build/mts/release/bora-24563316/publish/nsx-unified-appliance/exports/ovf" ova_file: "nsx-unified-appliance-9.0.0.0.24562919.ovf" vcenter: "10.162.72.205" vcenter_user: "administrator@vsphere.local" vcenter_passwd: "Admin!23" deployment_size: "small" role: "NSX Manager" # role: "nsx-manager nsx-controller" - name: Check manager status vmware.ansible_for_nsxt.nsxt_manager_status: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False wait_time: 50 - name: Deploy compute manager vmware.ansible_for_nsxt.nsxt_fabric_compute_managers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "VC1" server: "10.162.72.205" origin_type: vCenter credential: credential_type: UsernamePasswordLoginCredential username: "administrator@vsphere.local" password: "Admin!23" thumbprint: "36:43:34:D9:C2:06:27:4B:EE:C3:4A:AE:23:BF:76:A0:0C:4D:D6:8A:D3:16:55:97:62:07:C2:84:0C:D8:BA:66" state: present register: compute_manager # - name: Deploy controller # vmware.ansible_for_nsxt.nsxt_controllers: # hostname: "{{hostname}}" # username: "{{username}}" # password: "{{password}}" # validate_certs: False # deployment_requests: # - roles: # - CONTROLLER # form_factor: "MEDIUM" # user_settings: # cli_password: "Admin!23Admin" # root_password: "Admin!23Admin" # deployment_config: # placement_type: VsphereClusterNodeVMDeploymentConfig # vc_id: "{{compute_manager.id}}" # management_network_id: "network-44" # hostname: "controller-1" # compute_id: "domain-c49" # storage_id: "datastore-43" # default_gateway_addresses: # - 11.122.203.253 # management_port_subnets: # - ip_addresses: # - 11.142.201.25 # prefix_length: "19" # clustering_config: # clustering_type: ControlClusteringConfig # shared_secret: "123456" # join_to_existing_cluster: false # state: present - name: Create ip pool vmware.ansible_for_nsxt.nsxt_policy_ip_pool: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" pool_static_subnets: "{{item.pool_static_subnets}}" state: present with_items: - "{{ip_pools}}" - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "TransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" tz_type: "{{item.tz_type}}" #zone_id: "21ff0e36-1624-4c18-be2f-070513079185" state: "present" with_items: - "{{transportzones}}" # - name: Create logical switch # nsxt_logical_switches: # hostname: "{{hostname}}" # username: "{{username}}" # password: "{{password}}" # validate_certs: False # display_name: "test_lswitch" # replication_mode: SOURCE # admin_state: UP # transport_zone_id: "{{transport_zone.id}}" # state: "present" # register: logical_switch - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" - name: Create transport node vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" description: NSX configured Test Transport Node host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{host_transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_certificates.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Add a new certificate vmware.ansible_for_nsxt.nsxt_certificates: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "Certificate_file" pem_encoded_file: "/Path/to/certificate/file" #private_key_file: "/Path/to/p12/private/key/file" state: "present" ================================================ FILE: tests/playbooks/mp/test_certificates_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List all existing certificates vmware.ansible_for_nsxt.nsxt_certificates_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_cluster_profiles.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Add edge cluster vmware.ansible_for_nsxt.nsxt_cluster_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: EdgeHighAvailabilityProfile display_name: edge-cluster-profile-East description: "Edge cluster profile description" bfd_probe_interval: 1000 bfd_declare_dead_multiple: 3 bfd_allowed_hops: 1 standby_relocation_config: standby_relocation_threshold: 600 state: present ================================================ FILE: tests/playbooks/mp/test_cluster_profiles_facts.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List all cluster profiles vmware.ansible_for_nsxt.nsxt_cluster_profiles_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_compute_managers.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register compute manager vmware.ansible_for_nsxt.nsxt_fabric_compute_managers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False #compute_manager_id: "25d314b6-97f2-48e2-87b5-f9ce04caf5f8" display_name: "{{item.display_name}}" server: "{{item.server}}" origin_type: "{{item.origin_type}}" credential: credential_type: "{{item.credential_type}}" username: "{{item.username}}" password: "{{item.password}}" thumbprint: "{{item.thumbprint}}" state: present with_items: - "{{compute_managers}}" ================================================ FILE: tests/playbooks/mp/test_compute_managers_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List all compute managers vmware.ansible_for_nsxt.nsxt_fabric_compute_managers_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_configure_transport_node.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_tn.yml tasks: - name: Create ip pool vmware.ansible_for_nsxt.nsxt_ip_pools: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" subnets: "{{item.subnets}}" state: present with_items: - "{{ip_pools}}" - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "TransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" transport_type: "{{item.transport_type}}" host_switch_name: "{{item.host_switch_name}}" state: "present" with_items: - "{{transportzones}}" - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" mtu: 1600 teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" - name: Create transport node vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" transport_zone_endpoints: "{{item.transport_zone_endpoints}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_configure_transport_node_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_tn_9x.yml tasks: - name: Create ip pool vmware.ansible_for_nsxt.nsxt_policy_ip_pool: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" pool_static_subnets: "{{item.pool_static_subnets}}" state: present with_items: - "{{ip_pools}}" - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "TransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" tz_type: "{{item.tz_type}}" state: "present" with_items: - "{{transportzones}}" - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" mtu: 1600 teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" - name: Create transport node vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_edge_clusters.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Add edge cluster vmware.ansible_for_nsxt.nsxt_edge_clusters: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: edge-cluster-1 cluster_profile_bindings: - profile_name: "Profile01" resource_type: EdgeHighAvailabilityProfile members: - transport_node_name: "TN_1" state: present ================================================ FILE: tests/playbooks/mp/test_edge_clusters_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List edge clusters vmware.ansible_for_nsxt.nsxt_edge_clusters_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_global_manager_enable_service.yml ================================================ --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Make the global manager as Active vmware.ansible_for_nsxt.nsxt_global_manager_enable_service: hostname: "{{ global_managers[0].fqdn }}" username: "{{ global_managers[0].username }}" password: "{{ global_managers[0].password }}" validate_certs: "{{ validate_certs }}" id: "{{ global_managers[0].id }}" display_name: "{{ global_managers[0].display_name }}" ================================================ FILE: tests/playbooks/mp/test_global_manager_registration.yml ================================================ --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register global manager vmware.ansible_for_nsxt.nsxt_global_manager_registration: hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" mode: "{{ item.mode }}" connection_info: fqdn: "{{ item.fqdn }}" username: "{{ item.username }}" password: "{{ item.password }}" thumbprint: "{{ item.thumbprint }}" state: absent with_items: - "{{global_managers}}" ================================================ FILE: tests/playbooks/mp/test_global_managers_active.yml ================================================ --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Make the global manager as Active vmware.ansible_for_nsxt.nsxt_global_manager_active: hostname: "{{ global_managers[0].fqdn }}" username: "{{ global_managers[0].username }}" password: "{{ global_managers[0].password }}" validate_certs: "{{ validate_certs }}" id: "{{ global_managers[0].id }}" ================================================ FILE: tests/playbooks/mp/test_ip_blocks.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create ip block vmware.ansible_for_nsxt.nsxt_ip_blocks: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "MyDisplayname" cidr: "192.168.0.0/16" state: present ================================================ FILE: tests/playbooks/mp/test_ip_blocks_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List IP address block vmware.ansible_for_nsxt.nsxt_ip_blocks_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_ip_pools.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create ip pool vmware.ansible_for_nsxt.nsxt_ip_pools: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" subnets: "{{item.subnets}}" state: present with_items: - "{{ip_pools}}" ================================================ FILE: tests/playbooks/mp/test_ip_pools_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List IP pools vmware.ansible_for_nsxt.nsxt_ip_pools_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_licenses.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Add license vmware.ansible_for_nsxt.nsxt_licenses: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False license_key: "00000-00000-00000-00000-00000" state: "present" ================================================ FILE: tests/playbooks/mp/test_licenses_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get all licenses vmware.ansible_for_nsxt.nsxt_licenses_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_local_manager_registration.yml ================================================ --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register local manager vmware.ansible_for_nsxt.nsxt_local_manager_registration: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" id: "{{ item.id }}" site_connection_info: fqdn: "{{ item.fqdn }}" username: "{{ item.username }}" password: "{{ item.password }}" thumbprint: "{{ item.thumbprint }}" state: absent with_items: - "{{local_managers}}" ================================================ FILE: tests/playbooks/mp/test_local_managers_compatibility.yml ================================================ --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Checks the compatibility of a local manager for registration with a global manager vmware.ansible_for_nsxt.nsxt_local_managers_compatibility: hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: "{{ validate_certs }}" site_connection_info: fqdn: "{{ local_managers[0].fqdn }}" username: "{{ local_managers[0].username }}" password: "{{ local_managers[0].password }}" thumbprint: "{{ local_managers[0].thumbprint }}" register: task_output - debug: var: task_output.version_compatible ================================================ FILE: tests/playbooks/mp/test_local_managers_facts.yml ================================================ --- # # Playbook to get facts of local managers registered with a global manager # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register local manager vmware.ansible_for_nsxt.nsxt_local_managers_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: "{{ validate_certs }}" ================================================ FILE: tests/playbooks/mp/test_logical_ports.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create a Logical Port vmware.ansible_for_nsxt.nsxt_logical_ports: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "logical_port_1" logical_switch_name: "ls1" attachment: attachment_type: "VIF" id: "vif2" admin_state: "UP" state: "present" ================================================ FILE: tests/playbooks/mp/test_logical_ports_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List logical ports vmware.ansible_for_nsxt.nsxt_logical_ports_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_logical_router_ports.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create logical router port vmware.ansible_for_nsxt.nsxt_logical_router_ports: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "lrouterport-103" resource_type: "LogicalRouterDownLinkPort" logical_router_name: "tier-0" linked_logical_switch_port_id: target_type: "LogicalPort" target_id: "fa535fbd-c01f-4536-86e4-36ee3572b6f3" subnets: - ip_addresses: - 192.168.3.1 prefix_length: 24 state: "present" ================================================ FILE: tests/playbooks/mp/test_logical_router_ports_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List logical router ports vmware.ansible_for_nsxt.nsxt_logical_router_ports_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_logical_router_static_route.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Add Static Routes on a Logical Router vmware.ansible_for_nsxt.nsxt_logical_router_static_routes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "static_route" logical_router_name: "tier-0" next_hops: - administrative_distance: '2' ip_address: 192.168.200.254 network: 192.168.200.0/24 state: "present" ================================================ FILE: tests/playbooks/mp/test_logical_routers.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create logical router vmware.ansible_for_nsxt.nsxt_logical_routers: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "tier-0" edge_cluster_name: "edge-cluster-1" router_type: "TIER0" high_availability_mode: "ACTIVE_ACTIVE" state: "present" failover_mode: "NON_PREEMPTIVE" advanced_config: internal_transit_network: "169.254.0.0/28" ha_vip_configs: - enabled: False ha_vip_subnets: - active_vip_addresses: [ "12.12.4.4" ] prefix_length: "22" redundant_uplink_port_ids: [ "Uplink-1","Uplink-2" ] - enabled: False ha_vip_subnets: - active_vip_addresses: [ "12.12.4.5" ] prefix_length: "22" redundant_uplink_port_names: [ "Uplink-3","Uplink-4" ] redundant_uplink_port_ids: ["z", "y"] tags: - scope: "Scope1" tag: "Tag1" ================================================ FILE: tests/playbooks/mp/test_logical_routers_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List Logical Routers vmware.ansible_for_nsxt.nsxt_logical_routers_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_logical_switches.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create logical switch vmware.ansible_for_nsxt.nsxt_logical_switches: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "ls1" replication_mode: SOURCE admin_state: UP transport_zone_name: "TZ1" state: "present" ================================================ FILE: tests/playbooks/mp/test_logical_switches_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List All Logical Switches vmware.ansible_for_nsxt.nsxt_logical_switches_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_manager_auto_deployment.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Deploy and register a cluster node VM vmware.ansible_for_nsxt.nsxt_manager_auto_deployment: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False deployment_requests: - roles: - CONTROLLER - MANAGER form_factor: "SMALL" user_settings: cli_password: "Admin!23Admin" root_password: "Admin!23Admin" audit_password: "Admin!23Admin" deployment_config: ignore_ssl_verification: False placement_type: VsphereClusterNodeVMDeploymentConfig vc_name: "VC1" vc_username: "administrator@vsphere.local" vc_password: "Admin!23" management_network: "VM Network" hostname: "manager5.vmware.com" compute: "HostCluster" storage: "datastore01" disk_provisioning: "THIN" default_gateway_addresses: - 10.176.135.253 management_port_subnets: - ip_addresses: - 10.176.132.57 prefix_length: "19" dns_servers: - 10.172.40.1 - FD01:1:3:1001::10 management_port_ipv6_subnets: - ip_addresses: - 2620:124:6020:1045::1b prefix_length: "64" default_ipv6_gateway_addresses: - 2620:124:6020:1045::253 #node_id: 7503e86e-c502-46fc-8d91-45a06d314d88 state: present ================================================ FILE: tests/playbooks/mp/test_manager_auto_deployment_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Lists info for all cluster node VM auto-deployment vmware.ansible_for_nsxt.nsxt_manager_auto_deployment_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_manager_status.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Check manager status vmware.ansible_for_nsxt.nsxt_manager_status: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False wait_time: 30 check_mode: yes ================================================ FILE: tests/playbooks/mp/test_ovf_deployment.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: deploy NSX Manager OVA vmware.ansible_for_nsxt.nsxt_deploy_ova: ovftool_path: "/usr/bin" #folder: 'folder-os-datacenter' datacenter: "os-datacenter" datastore: "datastore" portgroup: "VM Network" cluster: "os-compute-cluster-1" vmname: "nsxt-manager" hostname: "nsxt-manager-10" dns_server: "10.172.40.1 FD01:1:3:1001::10" dns_domain: "eng.vmware.com" ntp_server: "10.172.40.1 FD01:1:3:1001::10" gateway: "10.176.135.253" gateway6_0: "2620:124:6020:1045::253" ip_address: "10.176.132.59" ip_address6_0: "2620:124:6020:1045::1a" netmask: "255.255.252.0" netmask6_0: "64" admin_password: "Admin!23Admin" cli_password: "Admin!23Admin" path_to_ova: "http://build-squid.eng.vmware.com/build/mts/release/bora-19956989/publish/nsx-unified-appliance/exports/ovf" ova_file: "nsx-unified-appliance-4.0.1.0.0.19956985.ovf" vcenter: "10.176.132.1" vcenter_user: "administrator@vsphere.local" vcenter_passwd: "Admin!23" deployment_size: "small" role: "NSX Manager" ssh_enabled: true allow_ssh_root_login: true disk_mode: thin ip_protocol: IPv6 ================================================ FILE: tests/playbooks/mp/test_principal_identities.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register a name-certificate combination vmware.ansible_for_nsxt.nsxt_principal_identities: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "Principal_display_name" description: 'Foo bar' certificate_name: 'Certificate_file' name: "Principal_name" certificate_pem_file: "/Path/to/cert/file" is_protected: True node_id: "node-1" role: "enterprise_admin" #certificate_name: "Certificate_file" state: "absent" ================================================ FILE: tests/playbooks/mp/test_principal_identities_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List all existing principal identities vmware.ansible_for_nsxt.nsxt_principal_identities_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_repo_sync.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get repo sync status of an auto deployed node vmware.ansible_for_nsxt.nsxt_repo_sync: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_repo_sync_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get repo sync status of an auto deployed node vmware.ansible_for_nsxt.nsxt_repo_sync_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False node_name: "manager-node02" ================================================ FILE: tests/playbooks/mp/test_rest.yml ================================================ # Test code for the nsxt_rest module. # Copyright: (c) 2020, sky-joker # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- - hosts: 127.0.0.1 gather_facts: no vars: nsxt_hostname: nsxt-manager-01 nsxt_username: admin nsxt_password: password tasks: - name: create a new segment vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.1/24" } ], } register: create_new_segment_result - assert: that: - create_new_segment_result.changed is sameas true - create_new_segment_result.body is defined - create_new_segment_result.body | length >= 1 - create_new_segment_result.body.id == "segment" - create_new_segment_result.body.subnets.0.gateway_address == "192.168.0.1/24" - name: create a new segment(again - expectation no change will occur) vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.1/24" } ], } register: create_new_segment_again_result - assert: that: - create_new_segment_again_result.changed is sameas false - create_new_segment_again_result.body is defined - create_new_segment_again_result.body | length >= 1 - create_new_segment_again_result.body.id == "segment" - create_new_segment_again_result.body.subnets.0.gateway_address == "192.168.0.1/24" - name: update segment parameter vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.2/24" } ], } register: update_segment_parameter_result - assert: that: - update_segment_parameter_result.changed is sameas true - update_segment_parameter_result.body is defined - update_segment_parameter_result.body | length >= 1 - update_segment_parameter_result.body.id == "segment" - update_segment_parameter_result.body.subnets.0.gateway_address == "192.168.0.2/24" - name: update segment parameter(again - expectation no change will occur) vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.2/24" } ], } register: update_segment_parameter_again_result - assert: that: - update_segment_parameter_again_result.changed is sameas false - update_segment_parameter_again_result.body is defined - update_segment_parameter_again_result.body | length >= 1 - update_segment_parameter_again_result.body.id == "segment" - update_segment_parameter_again_result.body.subnets.0.gateway_address == "192.168.0.2/24" - name: get segment information vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: get path: /policy/api/v1/infra/segments/segment register: get_segment_information_result - assert: that: - get_segment_information_result.changed is sameas false - get_segment_information_result.body is defined - get_segment_information_result.body | length >= 1 - get_segment_information_result.body.id == "segment" - get_segment_information_result.body.subnets.0.gateway_address == "192.168.0.2/24" - name: delete a segment vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: delete path: /policy/api/v1/infra/segments/segment register: delete_segment_result - assert: that: - delete_segment_result.changed is sameas true - name: delete a segment(again - expectation no change will occur) vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: delete path: /policy/api/v1/infra/segments/segment register: delete_segment_again_result - assert: that: - delete_segment_again_result.changed is sameas false - name: create json file for segment parameter copy: content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.1/24" } ], } dest: segment_parametr.json register: create_json_file_result - assert: that: - create_json_file_result.changed is sameas true - name: create a new segment with json file vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment src: segment_parametr.json register: create_new_segment_result - assert: that: - create_new_segment_result.changed is sameas true - create_new_segment_result.body is defined - create_new_segment_result.body | length >= 1 - create_new_segment_result.body.id == "segment" - create_new_segment_result.body.subnets.0.gateway_address == "192.168.0.1/24" - name: create a new segment with json file(again - expectation no change will occur) vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment src: segment_parametr.json register: create_new_segment_again_result - assert: that: - create_new_segment_again_result.changed is sameas false - create_new_segment_again_result.body is defined - create_new_segment_again_result.body | length >= 1 - create_new_segment_again_result.body.id == "segment" - create_new_segment_again_result.body.subnets.0.gateway_address == "192.168.0.1/24" - name: update json file for segment parameter copy: content: { "display_name": "segment", "subnets": [ { "gateway_address": "192.168.0.2/24" } ], } dest: segment_parametr.json register: update_json_file_result - assert: that: - update_json_file_result.changed is sameas true - name: update segment parameter vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment src: segment_parametr.json register: update_segment_parameter_result - assert: that: - update_segment_parameter_result.changed is sameas true - update_segment_parameter_result.body is defined - update_segment_parameter_result.body | length >= 1 - update_segment_parameter_result.body.id == "segment" - update_segment_parameter_result.body.subnets.0.gateway_address == "192.168.0.2/24" - name: update segment parameter(again - expectation no change will occur) vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: patch path: /policy/api/v1/infra/segments/segment src: segment_parametr.json register: update_segment_parameter_again_result - assert: that: - update_segment_parameter_again_result.changed is sameas false - update_segment_parameter_again_result.body is defined - update_segment_parameter_again_result.body | length >= 1 - update_segment_parameter_again_result.body.id == "segment" - update_segment_parameter_again_result.body.subnets.0.gateway_address == "192.168.0.2/24" - name: delete a segment vmware.ansible_for_nsxt.nsxt_rest: hostname: "{{ nsxt_hostname }}" username: "{{ nsxt_username }}" password: "{{ nsxt_password }}" validate_certs: false method: delete path: /policy/api/v1/infra/segments/segment register: delete_segment_result - assert: that: - delete_segment_result.changed is sameas true ================================================ FILE: tests/playbooks/mp/test_route_advertise.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Toggle tier 1 route advertisement vmware.ansible_for_nsxt.nsxt_route_advertise: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" display_name: "{{item.display_name}}" validate_certs: False enabled: "{{item.enabled}}" advertise_dns_forwarder: "{{item.advertise_dns_forwarder}}" advertise_lb_snat_ip: "{{item.advertise_lb_snat_ip}}" advertise_lb_vip: "{{item.advertise_lb_vip}}" advertise_nat_routes: "{{item.advertise_nat_routes}}" advertise_nsx_connected_routes: "{{item.advertise_nsx_connected_routes}}" advertise_static_routes: "{{item.advertise_static_routes}}" with_items: - "{{route_advertise}}" ================================================ FILE: tests/playbooks/mp/test_transport_node_collections.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Attach Transport node profile to cluster vmware.ansible_for_nsxt.nsxt_transport_node_collections: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "TNC1" resource_type: "TransportNodeCollection" description: "Transport Node Collections 1" compute_manager_name: "VC1" cluster_name: "os-compute-cluster-1" transport_node_profile_name: "TNP1" state: present ================================================ FILE: tests/playbooks/mp/test_transport_node_collections_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List Transport Node collections vmware.ansible_for_nsxt.nsxt_transport_node_collections_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_transport_node_profiles.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create transport node profile vmware.ansible_for_nsxt.nsxt_transport_node_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: TransportNodeProfile display_name: "{{item.display_name}}" description: NSX configured Test Transport Node Profile host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" state: present with_items: - "{{transport_node_profiles}}" ================================================ FILE: tests/playbooks/mp/test_transport_node_profiles_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List transport node profiles vmware.ansible_for_nsxt.nsxt_transport_node_profiles_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_transport_nodes.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create transport node vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" node_deployment_info: "{{item.node_deployment_info}}" # Host node deployment info # resource_type: "{{item.node_deployment_info.resource_type}}" # display_name: "{{item.node_deployment_info.display_name}}" # ip_addresses: "{{item.node_deployment_info.ip_addresses}}" # os_type: "{{item.node_deployment_info.os_type}}" # os_version: "{{item.node_deployment_info.os_version}}" # host_credential: # username: "{{item.node_deployment_info.host_credential.username}}" # password: "{{item.node_deployment_info.host_credential.password}}" # thumbprint: "{{item.node_deployment_info.host_credential.thumbprint}}" # Edge node deployment info # resource_type: "{{item.node_deployment_info.resource_type}}" # display_name: "{{item.node_deployment_info.display_name}}" # ip_addresses: "{{item.node_deployment_info.ip_addresses}}" # deployment_type: "{{item.node_deployment_info.deployment_type}}" # deployment_config: # form_factor: "{{item.node_deployment_info.deployment_config.form_factor}}" # node_user_settings: # cli_password: "{{item.node_deployment_info.deployment_config.node_user_settings.cli_password}}" # root_password: "{{item.node_deployment_info.deployment_config.node_user_settings.root_password}}" # vm_deployment_config: # placement_type: "{{item.node_deployment_info.deployment_config.vm_deployment_config.placement_type}}" # vc_id: "{{item.node_deployment_info.deployment_config.vm_deployment_config.vc_id}}" # data_network_ids: "{{item.node_deployment_info.deployment_config.vm_deployment_config.data_network_ids}}" # management_network_id: "{{item.node_deployment_info.deployment_config.vm_deployment_config.management_network_id}}" # hostname: "{{item.node_deployment_info.deployment_config.vm_deployment_config.hostname}}" # compute_id: "{{item.node_deployment_info.deployment_config.vm_deployment_config.compute_id}}" # storage_id: "{{item.node_deployment_info.deployment_config.vm_deployment_config.storage_id}}" # host_id: "{{item.node_deployment_info.deployment_config.vm_deployment_config.host_id}}" # default_gateway_addresses: "{{item.node_deployment_info.deployment_config.vm_deployment_config.default_gateway_addresses}}" # management_port_subnets: # - ip_addresses: "{{item.node_deployment_info.deployment_config.vm_deployment_config.management_port_subnets.ip_addresses}}" # prefix_length: "{{item.node_deployment_info.deployment_config.vm_deployment_config.management_port_subnets.prefix_length}}" state: present with_items: - "{{host_transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_transport_nodes_edge.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes tasks: - name: Create edge transport nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{display_name}}" description: "Edge transport node ansible" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" transport_zone_endpoints: "{{item.transport_zone_endpoints}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{edge_transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_transport_nodes_edge_9x.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_9x.yml tasks: - name: Create edge transport nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" description: "Edge transport node ansible" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{edge_transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_transport_nodes_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List Transport Nodes vmware.ansible_for_nsxt.nsxt_transport_nodes_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_transport_nodes_host.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create transport nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" description: "Transport node with host" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" transport_zone_endpoints: "{{item.transport_zone_endpoints}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{host_transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_transport_nodes_host_9x.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_9x.yml tasks: - name: Create transport nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "{{item.display_name}}" description: "Transport node with host" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{item.host_switches}}" node_deployment_info: "{{item.node_deployment_info}}" state: present with_items: - "{{host_transport_nodes}}" ================================================ FILE: tests/playbooks/mp/test_transport_zones.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: "PolicyTransportZone" display_name: "{{item.display_name}}" description: "NSX configured Test Transport Zone" tz_type: "{{item.tz_type}}" state: "present" is_default: false nested_nsx: False register: result with_items: - "{{transportzones}}" - debug: var=result.id ================================================ FILE: tests/playbooks/mp/test_transport_zones_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List Transport Zones vmware.ansible_for_nsxt.nsxt_transport_zones_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_upgrade_eula_accept.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Accepts EULA vmware.ansible_for_nsxt.nsxt_upgrade_eula_accept: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_eula_accept_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Retrieve information about EULA acceptance vmware.ansible_for_nsxt.nsxt_upgrade_eula_accept_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False required_info: 'acceptance' ================================================ FILE: tests/playbooks/mp/test_upgrade_groups.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create upgrade groups vmware.ansible_for_nsxt.nsxt_upgrade_groups: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False display_name: "TestGroupAnsible" type: "HOST" parallel: "true" enabled: "true" upgrade_units: - host_name: "10.160.165.71" state: "present" ================================================ FILE: tests/playbooks/mp/test_upgrade_groups_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get upgrade groups info vmware.ansible_for_nsxt.nsxt_upgrade_groups_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_history_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get upgrade history vmware.ansible_for_nsxt.nsxt_upgrade_history: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_plan.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Update upgrade plan vmware.ansible_for_nsxt.nsxt_upgrade_plan: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False component_type: 'host' parallel: True pause_after_each_group: True pause_on_error: True state: 'present' ================================================ FILE: tests/playbooks/mp/test_upgrade_plan_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Retrieve upgrade plan vmware.ansible_for_nsxt.nsxt_upgrade_plan_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False component_type: 'host' ================================================ FILE: tests/playbooks/mp/test_upgrade_postchecks.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Run upgrade postchecks vmware.ansible_for_nsxt.nsxt_upgrade_postchecks: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False timeout: 7200 component_type: 'host' ================================================ FILE: tests/playbooks/mp/test_upgrade_pre_post_checks_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get upgrade pre and post checks info vmware.ansible_for_nsxt.nsxt_upgrade_pre_post_checks_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_prechecks.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Run and abort upgrade prechecks vmware.ansible_for_nsxt.nsxt_upgrade_prechecks: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False state: 'present' ================================================ FILE: tests/playbooks/mp/test_upgrade_run.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Runs Upgrade vmware.ansible_for_nsxt.nsxt_upgrade_run: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False paused_upgrade: False ================================================ FILE: tests/playbooks/mp/test_upgrade_status_summary_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get upgrade status summary vmware.ansible_for_nsxt.nsxt_upgrade_status_summary_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_uc.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Upgrade UC vmware.ansible_for_nsxt.nsxt_upgrade_uc: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_uc_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get UC upgrade status vmware.ansible_for_nsxt.nsxt_upgrade_uc_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False ================================================ FILE: tests/playbooks/mp/test_upgrade_upload_mub.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Upload MUB to NSX-T Manager vmware.ansible_for_nsxt.nsxt_upgrade_upload_mub: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False url: "http://build-squid.eng.vmware.com/build/mts/release/bora-14179320/publish/upgrade/VMware-NSX-upgrade-bundle-2.5.0.0.0.14179320.mub" timeout: 9000 ================================================ FILE: tests/playbooks/mp/test_upgrade_upload_mub_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get info of uploaded MUB to NSX-T Manager vmware.ansible_for_nsxt.nsxt_upgrade_upload_mub_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False bundle_id: "2500014364090" ================================================ FILE: tests/playbooks/mp/test_uplink_profiles.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" mtu: 1600 teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" ================================================ FILE: tests/playbooks/mp/test_uplink_profiles_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile_9x.yml tasks: - name: Create uplink profile vmware.ansible_for_nsxt.nsxt_uplink_profiles: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False resource_type: UplinkHostSwitchProfile display_name: "{{item.display_name}}" teaming: "{{item.teaming}}" transport_vlan: "{{item.transport_vlan}}" state: "present" with_items: - "{{uplink_profiles}}" ================================================ FILE: tests/playbooks/mp/test_uplink_profiles_facts.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: List uplink profiles vmware.ansible_for_nsxt.nsxt_uplink_profiles_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/mp/test_vidm.yml ================================================ --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Register vidm to NSX vmware.ansible_for_nsxt.nsxt_vidm: hostname: "{{ hostname }}" username: "{{ username }}" password: "{{ password }}" validate_certs: "{{ validate_certs }}" client_id: "{{ vidm.client_id }}" client_secret: "{{ vidm.client_secret }}" host_name: "{{ vidm.host_name }}" lb_enable: "{{ vidm.lb_enable }}" node_host_name: "{{ vidm.node_host_name }}" thumbprint: "{{ vidm.thumbprint }}" state: absent ================================================ FILE: tests/playbooks/mp/test_virtual_ip.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Adds cluster virtual IP address vmware.ansible_for_nsxt.nsxt_virtual_ip: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False virtual_ip_address: "10.186.94.5" virtual_ip6_address: "2620:124:6020:c308::10" action: clear_virtual_ip state: absent ================================================ FILE: tests/playbooks/mp/test_virtual_ip_facts.yml ================================================ # Copyright 2019 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - answerfile.yml tasks: - name: Get all configured cluster virtual IP address vmware.ansible_for_nsxt.nsxt_virtual_ip_facts: hostname: "{{hostname}}" username: "{{username}}" password: "{{password}}" validate_certs: False check_mode: yes ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_bfd_profile.yaml ================================================ - hosts: localhost tasks: - name: Update BFD Profile vmware.ansible_for_nsxt.nsxt_policy_bfd_profile: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-bfd-profile state: present interval: 200 multiple: 10 ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_gateway_policy.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test Gateway Policy # - hosts: localhost tasks: - name: create Gateway Policy vmware.ansible_for_nsxt.nsxt_policy_gateway_policy: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle id: test-gateway-policy display_name: test-gateway-policy state: "present" domain_id: "default" locked: True rules: - action: "ALLOW" description: "example-rule" sequence_number: 1 display_name: "test-example-rule" id: "test-example-rule" source_groups: ["/infra/domains/vmc/groups/dbgroup"] destination_groups: ["/infra/domains/vmc/groups/appgroup"] services: ["/infra/services/HTTP", "/infra/services/CIM-HTTP"] tag: my-tag tags: - scope: scope-1 tag: tag-1 logged: True notes: dummy-notes ip_protocol: IPV4_IPV6 scope: - /infra/tier-0s/PLR1 profiles: "encryption algorithm" ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_group.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test Policy Group # - hosts: localhost tasks: - name: create Policy Group vmware.ansible_for_nsxt.nsxt_policy_group: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle id: test-policy-group display_name: test-policy-group state: "present" domain_id: "default" expression: - member_type: "VirtualMachine" value: "webvm" key: "Tag" operator: "EQUALS" resource_type: "Condition" ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_ip_block.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test IP Block # - hosts: localhost tasks: - name: create IP Block vmware.ansible_for_nsxt.nsxt_policy_ip_block: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-ip-blk state: "absent" cidr: "192.168.0.0/16" ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_ip_pool.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test IP Pool # - hosts: localhost tasks: - name: create IP Pool vmware.ansible_for_nsxt.nsxt_policy_ip_pool: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-ip-pool-1 state: "present" tags: - tag: "a" scope: "b" pool_block_subnets: - id: test-ip-subnet-1 state: present ip_block_id: "test-ip-blk-1" size: 16 - display_name: test-ip-subnet-2 state: present ip_block_id: "test-ip-blk-1" size: 16 - display_name: test-ip-subnet-3 state: present ip_block_id: "test-ip-blk-1" size: 8 pool_static_subnets: - id: test-ip-static-subnet-1 state: present allocation_ranges: - start: '192.116.0.10' end: '192.116.0.20' - start: '192.116.0.30' end: '192.116.0.40' cidr: '192.116.0.0/26' - display_name: test-ip-static-subnet-2 state: present allocation_ranges: - start: '192.116.1.10' end: '192.116.1.20' - start: '192.116.1.30' end: '192.116.1.40' cidr: '192.116.1.0/26' ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_l2_bridge_ep_profile.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test L2 Bridge Endpoint Profile # - hosts: localhost tasks: - name: update L2 Bridge Endpoint Profile vmware.ansible_for_nsxt.nsxt_policy_l2_bridge_ep_profile: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-br-profile state: present edge_nodes_info: - edge_cluster_display_name: edge-cluster edge_node_id: 60f7dc14-d11c-11ea-8fb5-000c29e1fb0e failover_mode: NON_PREEMPTIVE ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_security_policy.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test Security Policy # - hosts: localhost tasks: - name: create Security Policy vmware.ansible_for_nsxt.nsxt_policy_security_policy: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle id: test-sec-pol display_name: test-sec-pol state: "present" domain_id: "default" locked: True rules: - action: "ALLOW" description: "example-rule" sequence_number: 1 display_name: "test-example-rule" id: "test-example-rule" source_groups: ["/infra/domains/vmc/groups/dbgroup"] destination_groups: ["/infra/domains/vmc/groups/appgroup"] services: ["/infra/services/HTTP", "/infra/services/CIM-HTTP"] tag: my-tag tags: - scope: scope-1 tag: tag-1 logged: True notes: dummy-notes ip_protocol: IPV4_IPV6 scope: - /infra/tier-0s/PLR1 profiles: "encryption algorithm" ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_segment.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test Segment # - hosts: localhost tasks: - name: Update Segment vmware.ansible_for_nsxt.nsxt_policy_segment: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle state: present domain_name: dn1 transport_zone_display_name: "1-transportzone-730" replication_mode: "SOURCE" address_bindings: - ip_address: "10.1.2.11" advanced_config: address_pool_display_name: small-2-pool connectivity: "OFF" hybrid: False local_egress: True admin_state: UP connectivity_path: "/infra/tier-1s/d082bc25-a9b2-4d13-afe5-d3cecad4b854" subnets: - gateway_address: "40.1.1.1/16" # - dhcp_config: # # IPv4 example # options: # option121: # static_routes: # - network: "10.22.12.2/23" # next_hop: "10.10.10.10" # resource_type: SegmentDhcpV4Config # lease_time: 16400 # gateway_address: "192.40.10.1/24" # # IPv6 Example # resource_type: SegmentDhcpV6Config # preferred_time: 2048 # excluded_ranges: # - fc7e::1-fc7e::32 # server_address: "fc7e:f206:db42::2/48" # gateway_address: "fc7e:f206:db42::1/48" segment_ports: - display_name: test-sp-1 state: present tags: - scope: "scope-1" tag: "tag-2" extra_configs: - config_pair: key: key value: value ignored_address_bindings: - ip_address: "10.1.2.122" - display_name: test-sp-2 state: present - display_name: test-sp-3 state: present ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_tier0.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test Tier0 # - hosts: localhost tasks: - name: Update Tier0 vmware.ansible_for_nsxt.nsxt_policy_tier0: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-tier0-1 state: present ha_mode: "ACTIVE_STANDBY" failover_mode: "PREEMPTIVE" disable_firewall: True force_whitelisting: True rd_admin_field: "122.34.12.124" tags: - scope: "a" tag: "b" static_routes: - state: present display_name: test-sr network: '12.12.12.0/24' next_hops: - ip_address: "192.165.1.4" bfd_peers: - state: present display_name: test-peer-1 peer_address: "192.100.100.5" bfd_profile_id: test-bfd-profile locale_services: - state: present id: "test-t0ls" route_redistribution_config: redistribution_rules: - name: abc route_redistribution_types: ["TIER0_STATIC", "TIER0_NAT"] edge_cluster_info: edge_cluster_id: "7ef91a10-c780-4f48-a279-a5662db4ffa3" preferred_edge_nodes_info: - edge_cluster_id: "7ef91a10-c780-4f48-a279-a5662db4ffa3" edge_node_id: "e10c42dc-db27-11e9-8cd0-000c291af7ee" ha_vip_configs: - external_interface_info: - display_name: '3-policyconnectivity-64' - id: '4-policyconnectivity-562' vip_subnets: - ip_addresses: - '12.12.12.12' prefix_len: 23 BGP: state: present local_as_num: '1211' inter_sr_ibgp: False graceful_restart_config: mode: "GR_AND_HELPER" timer: restart_timer: 12 route_aggregations: - prefix: "10.1.1.0/24" - prefix: "11.1.0.0/24" summary_only: False neighbors: - display_name: neigh1 neighbor_address: "1.2.3.4" remote_as_num: "12" state: present interfaces: - id: "test-t0-t0ls-iface" display_name: "test-t0-t0ls-iface" state: present subnets: - ip_addresses: ["35.1.1.1"] prefix_len: 24 segment_id: "test-seg-4" edge_node_info: edge_cluster_id: "7ef91a10-c780-4f48-a279-a5662db4ffa3" edge_node_id: "e10c42dc-db27-11e9-8cd0-000c291af7ee" mtu: 1500 urpf_mode: "NONE" multicast: enabled: True ipv6_ndra_profile_display_name: test vrf_config: display_name: my-vrf id: my-vrf2 tier0_display_name: node-t0 tags: - scope: scope-tag-1 tag: value-tag-1 route_distinguisher: 'ASN:4000' evpn_transit_vni: 6000 ================================================ FILE: tests/playbooks/policy/test_nsxt_policy_tier1.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to test Tier1 # - hosts: localhost tasks: - name: Update Tier1 vmware.ansible_for_nsxt.nsxt_policy_tier1: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle display_name: test-tier22222 state: present failover_mode: "PREEMPTIVE" disable_firewall: True force_whitelisting: True enable_standby_relocation: False tags: - scope: "a" tag: "b" route_advertisement_rules: - name: "test-route-advertisement-rules" route_advertisement_types: ['TIER1_STATIC_ROUTES', 'TIER1_CONNECTED'] subnets: ["35.1.1.1/23"] route_advertisement_types: ['TIER1_STATIC_ROUTES', 'TIER1_CONNECTED', 'TIER1_NAT'] tier0_display_name: "node-t0" static_routes: - state: present display_name: test-sr network: '12.12.12.0/24' next_hops: - ip_address: "192.165.1.4" locale_services: - state: present display_name: test-t1ls-2 route_redistribution_config: redistribution_rules: - name: abc route_redistribution_types: ["TIER0_STATIC", "TIER0_NAT"] ha_vip_configs: - external_interface_info: # Either of the two ways below can be used - external_interface_path: /infra/tier-0s/pepsi/locale-services/1-policyconnectivity-706/interfaces/2-policyconnectivity-1411 - tier0_display_name: pepsi tier0_ls_display_name: 1-policyconnectivity-706 tier0_ls_interface_display_name: 1-policyconnectivity-1649 vip_subnets: - ip_addresses: - '12.12.12.12' prefix_len: 23 interfaces: - id: "test-t1-t1ls-iface-2" display_name: "test-t1-t1ls-iface" state: present subnets: - ip_addresses: ["35.1.1.1"] prefix_len: 24 segment_id: "test-seg-2" ipv6_ndra_profile_id: test mtu: 1400 urpf_mode: NONE ================================================ FILE: tests/playbooks/policy/test_vm_tags.yaml ================================================ - hosts: localhost tasks: - name: Update Tags on VMs vmware.ansible_for_nsxt.nsxt_vm_tags: hostname: "default" nsx_cert_path: /root/com.vmware.nsx.ncp/nsx.crt nsx_key_path: /root/com.vmware.nsx.ncp/nsx.key validate_certs: True ca_path: /path/to/my/ca-bundle virtual_machine_display_name: App-VM-1 remove_other_tags: False add_tags: - scope: "my-scope" tag: "my-tag" # - scope: "my-scope1" # tag: "my-tag" remove_tags_with_scope: - my-scope1 ================================================ FILE: tests/playbooks/topologies/deploy_nsx_cluster/01_deploy_first_node.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to deploy the first NSX Appliance node. Also checks the node # status # - hosts: 127.0.0.1 connection: local become: yes vars_files: - deploy_nsx_cluster_vars.yml tasks: - name: deploy NSX Manager OVA vmware.ansible_for_nsxt.nsxt_deploy_ova: ovftool_path: "/usr/bin" datacenter: "{{ nsx_node1['datacenter'] }}" datastore: "{{ nsx_node1['datastore'] }}" portgroup: "{{ nsx_node1['portgroup'] }}" cluster: "{{ nsx_node1['cluster'] }}" vmname: "{{ nsx_node1['hostname'] }}" hostname: "{{ nsx_node1['hostname'] }}" dns_server: "{{ dns_server }}" dns_domain: "{{ domain }}" ntp_server: "{{ ntp_server }}" gateway: "{{ gateway }}" ip_address: "{{ nsx_node1['mgmt_ip'] }}" netmask: "{{ netmask }}" admin_password: "{{ nsx_password }}" cli_password: "{{ nsx_password }}" path_to_ova: "{{ nsx_ova_path }}" ova_file: "{{ nsx_ova }}" vcenter: "{{ compute_managers[0]['mgmt_ip'] }}" vcenter_user: "{{ compute_managers[0]['username'] }}" vcenter_passwd: "{{ compute_managers[0]['password'] }}" deployment_size: "small" # Note: The role below is for NSX 2.5 and above. For prior # release, the role should be "nsx-manager nsx-controller" role: "NSX Manager" - name: Check manager status vmware.ansible_for_nsxt.nsxt_manager_status: hostname: "{{ nsx_node1['mgmt_ip'] }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" wait_time: 50 ================================================ FILE: tests/playbooks/topologies/deploy_nsx_cluster/02_configure_compute_manager.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - deploy_nsx_cluster_vars.yml tasks: - name: Register compute manager vmware.ansible_for_nsxt.nsxt_fabric_compute_managers: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" server: "{{ item.mgmt_ip }}" origin_type: "{{ item.origin_type }}" credential: credential_type: "{{ item.credential_type }}" username: "{{ item.username }}" password: "{{ item.password }}" state: present with_items: - "{{compute_managers}}" ================================================ FILE: tests/playbooks/topologies/deploy_nsx_cluster/03_deploy_second_third_node.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Deploys remaining NSX appliance nodes and forms a cluster. Requires the first # NSX appliance node to be deployed and at least one Compute Manager registered. # - hosts: 127.0.0.1 connection: local become: yes vars_files: - deploy_nsx_cluster_vars.yml tasks: - name: Deploying additional nodes vmware.ansible_for_nsxt.nsxt_manager_auto_deployment: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" deployment_requests: - roles: - CONTROLLER - MANAGER form_factor: "SMALL" user_settings: cli_password: "{{ nsx_password }}" root_password: "{{ nsx_password }}" deployment_config: placement_type: VsphereClusterNodeVMDeploymentConfig vc_name: "{{ compute_managers[0]['display_name'] }}" vc_username: "{{ compute_managers[0]['username'] }}" vc_password: "{{ compute_managers[0]['password'] }}" management_network: "{{ item.portgroup }}" hostname: "{{ item.hostname }}" compute: "{{ item.cluster }}" storage: "{{ item.datastore }}" default_gateway_addresses: - "{{ gateway }}" dns_servers: - "{{ dns_server }}" ntp_servers: - "{{ ntp_server }}" management_port_subnets: - ip_addresses: - "{{ item.mgmt_ip }}" prefix_length: "{{ item.prefix }}" state: present with_items: - "{{ additional_nodes }}" ================================================ FILE: tests/playbooks/topologies/deploy_nsx_cluster/04_add_nsx_license.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- # # Playbook to register Compute Managers with NSX Appliance # - hosts: 127.0.0.1 connection: local become: yes vars_files: - deploy_nsx_cluster_vars.yml tasks: - name: Add NSX License vmware.ansible_for_nsxt.nsxt_licenses: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" license_key: "{{ item.license_key }}" state: "{{ state }}" with_items: - "{{nsxt_licenses}}" ================================================ FILE: tests/playbooks/topologies/deploy_nsx_cluster/README.md ================================================ # Deploy NSX-T Cluster # Overview The set of playbooks in this example deploy a full NSX Cluster. The playbooks are divided based on the workflow. There are 3 main playbooks and a common variable files: * 01_deploy_first_node.yml * 02_configure_compute_manager.yml * 03_deploy_second_third_node.yml * deploy_nsx_cluster_vars.yml To run the example, copy all the files two-levels up, edit the variables file to match your needs and run the playbooks in the order listed. Validated against: * NSX-T 2.4 GA * NSX-T 2.5 GA It currently does not configure a cluster Virtual IP ================================================ FILE: tests/playbooks/topologies/deploy_nsx_cluster/deploy_nsx_cluster_vars.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # Variables file for deploying NSX-T Cluster # { # # Common NSX Appliance variables # "nsx_username": "admin", "nsx_password": "myPassword1!myPassword1!", "validate_certs": False, "state": "present", # # OVA/OVF Information. Path can be on local file system or a HTTP URL # "nsx_ova_path": "/media/disk2", # "nsx_ova": "nsx-unified-appliance-3.0.0.0.0.15946739.ova", "nsx_ova": "nsx-unified-appliance-3.0.1.0.0.16404476.ova", # # Common network details. This assumes all NSX appliance nodes are on the # same subnet. If there is a need to deploy NSX appliance nodes which are # on different subnets, add node specific details in the blocks below and # use them in the playbooks instead. # "domain": "mylab.local", "netmask": "255.255.255.224", "gateway": "10.114.200.33", "dns_server": "10.116.1.201", "ntp_server": "10.114.200.8", # # First NSX appliance node. Defined separate based on the consumption. # Accepts both IP (IPv4) and FQDN for 'mgmt_ip' # "nsx_node1": { "hostname": "mynsx-01.mylab.local", "mgmt_ip": "10.114.200.41", "datacenter": "Datacenter", "cluster": "Management", "datastore": "datastore36", "portgroup": "VM Network" }, # # Additional nodes defined as an array so that its easier to iterate # through them in the playbook. # "additional_nodes": [ { "hostname": "mynsx-02.mylab.local", "mgmt_ip": "10.114.200.42", "prefix": "27", "datacenter": "Datacenter", "cluster": "Management", "datastore": "datastore36", "portgroup": "VM Network" }, { "hostname": "mynsx-03.mylab.local", "mgmt_ip": "10.114.200.43", "prefix": "27", "datacenter": "Datacenter", "cluster": "Management", "datastore": "datastore36", "portgroup": "VM Network" } ], # # One or more compute managers that have to be registered with NSX # "compute_managers": [ { "display_name": "vcenter", "mgmt_ip": "10.114.200.6", "origin_type": "vCenter", "credential_type": "UsernamePasswordLoginCredential", "username": "administrator@madhu.local", "password": "VMware1!" } ], # # NSX-T licenses # "nsxt_licenses": [ { "license_key": "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX" } ], } ================================================ FILE: tests/playbooks/topologies/misc/create_and_attach_t0_t1_routers.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # This example shows how to create T0 and T1 Router and connect them # --- - hosts: 127.0.0.1 connection: local become: yes vars: nsx: "192.168.200.11" user: "admin" password: "myPassword1!myPassword1!" state: "present" tasks: - name: Create T0 vmware.ansible_for_nsxt.nsxt_logical_routers: hostname: "{{ nsx }}" username: "{{ user }}" password: "{{ password }}" validate_certs: False state: "{{ state }}" display_name: "myTier-0" edge_cluster_name: "Edge-Cluster-01" router_type: "TIER0" high_availability_mode: "ACTIVE_ACTIVE" - name: Create T1 vmware.ansible_for_nsxt.nsxt_logical_routers: hostname: "{{ nsx }}" username: "{{ user }}" password: "{{ password }}" validate_certs: False state: "{{ state }}" display_name: "myTier-1" edge_cluster_name: "Edge-Cluster-01" router_type: "TIER1" - name: Create Tier0 router port to Tier1 vmware.ansible_for_nsxt.nsxt_logical_router_ports: hostname: "{{ nsx }}" username: "{{ user }}" password: "{{ password }}" validate_certs: False state: "{{ state }}" display_name: "RouterPortOnTier0toT1" resource_type: "LogicalRouterLinkPortOnTIER0" logical_router_name: "myTier-0" - name: Get Router ID uri: url: "https://192.168.200.11/api/v1/logical-router-ports?resource_type=LogicalRouterLinkPortOnTIER0" user: "{{ user }}" password: "{{ password }}" validate_certs: False return_content: yes use_proxy: false force_basic_auth: true register: ports - name: Create Tier1 router port to Tier0 vmware.ansible_for_nsxt.nsxt_logical_router_ports: hostname: "{{ nsx }}" username: "{{ user }}" password: "{{ password }}" validate_certs: False state: "{{ state }}" display_name: "RouterPortOnTier1toT0" resource_type: "LogicalRouterLinkPortOnTIER1" logical_router_name: "myTier-1" linked_logical_router_port_id: target_id: "{{ ports.json['results'][0].id }}" target_type: LogicalPort ================================================ FILE: tests/playbooks/topologies/policy_modules/01_create_t0_gateway.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: localhost become: yes vars_files: - build_topology_vars.yml tasks: - name: Modify Tier0 vmware.ansible_for_nsxt.nsxt_policy_tier0: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" state: "{{ state }}" display_name: "{{ item.display_name }}" ha_mode: "{{ item.ha_mode }}" tags: "{{ item.tags }}" locale_services: "{{ item.locale_services }}" with_items: - "{{ tier0_gateways }}" ================================================ FILE: tests/playbooks/topologies/policy_modules/02_create_t1_gateway.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: localhost become: yes vars_files: - build_topology_vars.yml tasks: - name: Modify Tier1 vmware.ansible_for_nsxt.nsxt_policy_tier1: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" state: "{{ state }}" display_name: "{{ item.display_name }}" tier0_display_name: "{{ item.tier0_display_name }}" tags: "{{ item.tags }}" with_items: - "{{ tier1_gateways }}" ================================================ FILE: tests/playbooks/topologies/policy_modules/03_create_segments.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: localhost become: yes vars_files: - build_topology_vars.yml tasks: - name: Modify Segment vmware.ansible_for_nsxt.nsxt_policy_segment: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" state: "{{ state }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" tier1_id: "{{ item.tier1_display_name }}" domain_name: "{{ item.domain_name }}" transport_zone_display_name: "{{ item.tz }}" subnets: "{{ item.subnets }}" tags: "{{ item.tags }}" with_items: - "{{ segments }}" ================================================ FILE: tests/playbooks/topologies/policy_modules/04_create_groups.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: localhost become: yes vars_files: - build_topology_vars.yml tasks: - name: Modify Groups vmware.ansible_for_nsxt.nsxt_policy_group: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" state: "{{ state }}" validate_certs: "{{ validate_certs }}" domain_id: "{{ item.domain_id }}" display_name: "{{ item.display_name }}" expression: "{{ item.expression }}" tags: "{{ item.tags }}" with_items: - "{{ mygroups }}" ================================================ FILE: tests/playbooks/topologies/policy_modules/05_create_security_policy.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: localhost become: yes vars_files: - build_topology_vars.yml tasks: - name: Modify Security Policy vmware.ansible_for_nsxt.nsxt_policy_security_policy: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" state: "{{ state }}" display_name: "{{ item.display_name }}" domain_id: "{{ item.domain_id }}" category: "{{ item.category }}" rules: "{{ item.rules }}" tags: "{{ item.tags }}" with_items: - "{{ security_policies }}" ================================================ FILE: tests/playbooks/topologies/policy_modules/README.md ================================================ Example to build the following Topology: * 1 Tier0 Gateway * 1 Tier1 Gateway * 3 Subnets * 3 Groups * 3 Security Policies with 1 rule each ================================================ FILE: tests/playbooks/topologies/policy_modules/build_topology.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - import_playbook: 01_create_t0_gateway.yml - import_playbook: 02_create_t1_gateway.yml - import_playbook: 03_create_segments.yml - import_playbook: 04_create_groups.yml - import_playbook: 05_create_security_policy.yml ================================================ FILE: tests/playbooks/topologies/policy_modules/build_topology_vars.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # Variables file to deploy a simple topology { "nsx_manager": "nsx_manager_IP", "nsx_username": "nsx_username", "nsx_password": "nsx_password", "validate_certs": "false", "state": "present", "tier0_gateways": [ { "display_name": "Tier-0", "ha_mode": "ACTIVE_STANDBY", "tags": [ { "tag": "ansible", "scope": "demo" } ], "locale_services": [ { "state": "present", "id": "test-t0ls", "route_redistribution_config": { "redistribution_rules": [ "route_redistribution_types": ["TIER0_STATIC", "TIER0_NAT"] ] }, "edge_cluster_info": { "edge_cluster_display_name": "Edge-Cluster-01", }, "preferred_edge_nodes_info": [ { "edge_cluster_display_name": "Edge-Cluster-01", "edge_node_display_name": "EdgeNode-01" } ], "BGP": { "state": "present", "local_as_num": '1211' } } ] } ], "tier1_gateways": [ { "display_name": "Tier-1", "tier0_display_name": "Tier-0", "tags": [ { "tag": "ansible", "scope": "demo" } ] } ], "segments": [ { "display_name": "Web-Segment", "tier1_display_name": "Tier-1", "tz": "Overlay-TZ", "domain_name": "mylab.net", "subnets": [ { "gateway_address": "192.168.10.1/24" } ], "tags": [ { "tag": "ansible", "scope": "demo" }, { "tag": "web", "scope": "east" } ] }, { "display_name": "App-Segment", "tier1_display_name": "Tier-1", "tz": "Overlay-TZ", "domain_name": "mylab.net", "subnets": [ { "gateway_address": "192.168.20.1/24" } ], "tags": [ { "tag": "ansible", "scope": "demo" }, { "tag": "app", "scope": "east" } ] }, { "display_name": "DB-Segment", "tier1_display_name": "Tier-1", "tz": "Overlay-TZ", "domain_name": "mylab.net", "subnets": [ { "gateway_address": "192.168.30.1/24" } ], "tags": [ { "tag": "ansible", "scope": "demo" }, { "tag": "db", "scope": "east" } ] } ], # Note: 'group' is a reserved key. Cant use it here. "mygroups": [ { "display_name": "web-VMs", "domain_id": "default", "expression": [ { "member_type": "VirtualMachine", "value": "web", "key": "Tag", "operator": "EQUALS", "resource_type": "Condition" } ], "tags": [ { "tag": "ansible", "scope": "demo" }, { "tag": "web", "scope": "east" } ] }, { "display_name": "app-VMs", "domain_id": "default", "expression": [ { "member_type": "VirtualMachine", "value": "app", "key": "Tag", "operator": "EQUALS", "resource_type": "Condition" } ], "tags": [ { "tag": "ansible", "scope": "demo" }, { "tag": "app", "scope": "east" } ] }, { "display_name": "db-VMs", "domain_id": "default", "expression": [ { "member_type": "VirtualMachine", "value": "web", "key": "Tag", "operator": "EQUALS", "resource_type": "Condition" } ], "tags": [ { "tag": "ansible", "scope": "demo" }, { "tag": "db", "scope": "east" } ] } ], "security_policies" : [ { "display_name": "web-to-app", "domain_id": "default", "category": "Application", "rules": [ { "display_name": "app-port", "description": "Rule for Application port", "sequence_number": 1, "source_groups": [ "/infra/domains/default/groups/web-VMs" ], "destination_groups": [ "/infra/domains/default/groups/app-VMs" ], "services": [ "/infra/services/AD_Server" ], "action": "ALLOW" }, { "display_name": "Catch-All", "description": "Catch All rule", "sequence_number": 2, "source_groups": [ "any" ], "destination_groups": [ "any" ], "services": [ "any" ], "action": "DROP" }, ], "tags": [ { "tag": "ansible", "scope": "demo" } ] }, { "display_name": "db", "domain_id": "default", "category": "Infrastructure", "rules": [ { "display_name": "MySQL", "description": "Rule for Application port", "sequence_number": 1, "source_groups": [ "any" ], "destination_groups": [ "any" ], "services": [ "/infra/services/MySQL" ], "action": "ALLOW" }, { "display_name": "Catch-All", "description": "Catch All rule", "sequence_number": 2, "source_groups": [ "any" ], "destination_groups": [ "any" ], "services": [ "any" ], "action": "DROP" }, ], "tags": [ { "tag": "ansible", "scope": "demo" } ] } ] } ================================================ FILE: tests/playbooks/topologies/policy_modules/cleanup_topology.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - import_playbook: 05_create_security_policy.yml - import_playbook: 04_create_groups.yml - import_playbook: 03_create_segments.yml - import_playbook: 02_create_t1_gateway.yml - import_playbook: 01_create_t0_gateway.yml ================================================ FILE: tests/playbooks/topologies/setup_infra/01_setup_transport_zones.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars.yml tasks: - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" resource_type: "TransportZone" display_name: "{{ item.display_name }}" description: "{{ item.description }}" transport_type: "{{ item.transport_type }}" host_switch_name: "{{ item.host_switch_name }}" state: "{{ state }}" with_items: - "{{ transport_zones }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/01_setup_transport_zones_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars_9x.yml tasks: - name: Create transport zone vmware.ansible_for_nsxt.nsxt_transport_zones: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" resource_type: "TransportZone" display_name: "{{ item.display_name }}" description: "{{ item.description }}" tz_type: "{{item.tz_type}}" state: "{{ state }}" with_items: - "{{ transport_zones }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/02_setup_TEP_IP_Pools.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars.yml tasks: - name: Create IP Pools vmware.ansible_for_nsxt.nsxt_policy_ip_pool: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" pool_static_subnets: "{{ item.pool_static_subnets }}" state: "{{ state }}" with_items: - "{{ ip_pools }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/03_setup_transport_node_profiles.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars.yml tasks: - name: Create Transport Node Profiles vmware.ansible_for_nsxt.nsxt_transport_node_profiles: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" resource_type: TransportNodeProfile display_name: "{{ item.display_name }}" description: "{{ item.description }}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{ item.host_switches }}" transport_zone_endpoints: "{{ item.transport_zone_endpoints }}" state: "{{ state }}" with_items: - "{{ transport_node_profiles }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/03_setup_transport_node_profiles_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars_9x.yml tasks: - name: Create Transport Node Profiles vmware.ansible_for_nsxt.nsxt_transport_node_profiles: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" resource_type: TransportNodeProfile display_name: "{{ item.display_name }}" description: "{{ item.description }}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{ item.host_switches }}" state: "{{ state }}" with_items: - "{{ transport_node_profiles }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/04_setup_transport_nodes.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars.yml tasks: - name: Create Transport Nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{ item.host_switches }}" transport_zone_endpoints: "{{ item.transport_zone_endpoints }}" node_deployment_info: "{{ item.node_deployment_info }}" state: "{{ state }}" with_items: - "{{ transport_nodes }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/04_setup_transport_nodes_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars_9x.yml tasks: - name: Create Transport Nodes vmware.ansible_for_nsxt.nsxt_transport_nodes: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" host_switch_spec: resource_type: StandardHostSwitchSpec host_switches: "{{ item.host_switches }}" node_deployment_info: "{{ item.node_deployment_info }}" state: "{{ state }}" with_items: - "{{ transport_nodes }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/05_setup_edge_cluster.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars.yml tasks: - name: Add edge cluster vmware.ansible_for_nsxt.nsxt_edge_clusters: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" cluster_profile_bindings: "{{ item.cluster_profile_bindings }}" members: "{{ item.members }}" state: "{{ state }}" with_items: - "{{ edge_clusters }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/06_setup_transport_node_collections.yml ================================================ # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only --- - hosts: 127.0.0.1 connection: local become: yes vars_files: - setup_infra_vars.yml tasks: - name: Create Transport Node Collections vmware.ansible_for_nsxt.nsxt_transport_node_collections: hostname: "{{ nsx_node1.mgmt_ip }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "{{ item.display_name }}" description: "{{ item.description }}" resource_type: "TransportNodeCollection" compute_manager_name: "{{ item.compute_manager_name }}" cluster_name: "{{ item.cluster_name }}" transport_node_profile_name: "{{ item.transport_node_profile_name }}" state: "{{ state }}" with_items: - "{{ transport_node_collections }}" ================================================ FILE: tests/playbooks/topologies/setup_infra/README.md ================================================ # Setup Day-1 Infra # Overview The set of playbooks in this example deploy all the Day-1 Infra objects needed to start using NSX-T. The playbooks are divided based on the workflow. There are 4 main playbooks and a common variable file: * 01_setup_transport_zones.yml * 02_setup_TEP_IP_Pools.yml: In this example, a single IP Pool is used to provide TEP IP for both Edge and Host Transport nodes. * 03_setup_transport_node_profiles.yml * 04_setup_transport_nodes.yml: Creates both Edge and Host Transport nodes * 05_setup_edge_cluster.yml * setup_infra_vars.yml: The variables file To delete all objects, change the 'state' to 'absent' in the variables file and run the playbooks in the reverse order: * 05_setup_edge_cluster.yml * 04_setup_transport_nodes.yml * 03_setup_transport_node_profiles.yml * 02_setup_TEP_IP_Pools.yml * 01_setup_transport_zone.yml Validated against: * NSX-T 2.4 GA * NSX-T 2.5 GA ================================================ FILE: tests/playbooks/topologies/setup_infra/setup_infra_vars.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # # Variables file for Day-0/1 setup # Creates the following: # - 2 Transport Zones # - 1 IP Pool (used by Edge) # - 1 Transport Node Profile with 2 TZ endpoints # - 2 Edge Transport Nodes # - 2 ESX Host Transport Nodes # - 1 Edge Cluster with the 2 Edge Nodes # { # # Flag to create or delete all the objects # Accepts: 'present' to create; 'absent' to delete # "state": "present", # # Common NSX Appliance variables # "nsx_username": "admin", "nsx_password": "myPassword1!myPassword1!", "validate_certs": False, # # First NSX appliance node. Defined separate based on the consumption. # Accepts both IP (IPv4) and FQDN for 'mgmt_ip' # "nsx_node1": { "hostname": "mynsx-01.mylab.local", "mgmt_ip": "10.114.200.41", "datacenter": "Datacenter", "cluster": "Management", "datastore": "datastore36", "portgroup": "VM Network" }, "transport_zones": [ { "display_name": "Overlay-TZ", "description": "NSX Configured Overlay Transport Zone", "transport_type": "OVERLAY", "host_switch_name": "nvds" }, { "display_name": "VLAN-TZ", "description": "NSX Configured VLAN Transport Zone", "transport_type": "VLAN", "host_switch_name": "nvds" } ], "ip_pools": [ { "display_name": "to-del-TEP-IP-Pool", "pool_static_subnets": [ { "display_name": "TEP_ip_pool_subnet", "state": "present", "allocation_ranges": [ { "start": "172.16.227.50", "end": "172.16.227.59" } ], "gateway_ip": "172.16.227.1", "cidr": "172.16.227.0/24" } ] } ], "transport_node_profiles": [ { "display_name": "TNP-1", "description": "Compute Transport Node Profile", "host_switches": [ { "host_switch_name": "vds7", "host_switch_id": "50 35 17 4a 3d be 59 37-57 d0 0d 03 11 ae 7e a2", "host_switch_type": "VDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-default-uplink-hostswitch-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [], "uplinks": [ { "vds_uplink_name": "uplink1", "uplink_name": "uplink-1" }, { "vds_uplink_name": "uplink2", "uplink_name": "uplink-2" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "transport_zone_endpoints": [] } ], "transport_node_collections": [ { "state": "present", "display_name": "workload-cluster-1-collection", "description": "TNP for cluster1", "resource_type": "TransportNodeCollection", "compute_manager_name": "vcenter7", "cluster_name": "workload-cluster1", "transport_node_profile_name": "TNP-1" } ], "transport_nodes": [ { "display_name": "EdgeNode-01", "description": "NSX Edge Node 01", "host_switches": [ { "host_switch_name": "nvds", "host_switch_type": "NVDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-edge-single-nic-uplink-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [ { "device_name": "fp-eth0", "uplink_name": "uplink-1" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "transport_zone_endpoints": [], "node_deployment_info": { "deployment_type": "VIRTUAL_MACHINE", "deployment_config": { "form_factor": "SMALL", "vm_deployment_config": { "vc_name": "vcenter", "vc_username": "administrator@madhu.local", "vc_password": "VMware1!", "storage": "datastore7", "compute": "Management", "host": "10.114.200.7", "data_networks": [ "VM Network", "lab-dvpg", "lab-dvpg" ], "default_gateway_addresses": [ "10.114.200.1" ], "management_network": "VM Network", "management_port_subnets": [ { "ip_addresses": [ "10.114.200.14" ], "prefix_length": 27 } ], "placement_type": "VsphereDeploymentConfig" }, "node_user_settings": { "cli_username": "admin" , "root_password": "myPassword1!myPassword1!", "cli_password": "myPassword1!myPassword1!", "audit_username": "audit", "audit_password": "myPassword1!myPassword1!" } }, "node_settings": { "allow_ssh_root_login": "True", "enable_ssh": "True", "hostname": "edgenode-01.madhu.local" }, "resource_type": "EdgeNode", "display_name": "EdgeNode-01" } }, { "resource_type": "TransportNode", "display_name": "esx7-12", "description": "Host Transport Node for ESXi7-12", "host_switches": [ { "host_switch_name": "nsxvswitch", "host_switch_type": "NVDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-default-uplink-hostswitch-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [ { "device_name": "vmnic1", "uplink_name": "uplink-1" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "transport_zone_endpoints": [ ], "node_deployment_info": { "resource_type": "HostNode", "ip_addresses": ["10.114.200.12"], "os_type": "ESXI", "host_credential": { "username": "root", "password": "VMware1!" } } }, { "resource_type": "TransportNode", "display_name": "esx7-c13", "description": "Host Transport Node for esxi c13", "host_switches": [ { "host_switch_name": "nsxvswitch", "host_switch_type": "NVDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-default-uplink-hostswitch-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [ { "device_name": "vmnic1", "uplink_name": "uplink-1" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "transport_zone_endpoints": [ ], "node_deployment_info": { "resource_type": "HostNode", "ip_addresses": ["10.114.200.13"], "os_type": "ESXI", "host_credential": { "username": "root", "password": "VMware1!" } } } ], "edge_clusters": [ { "display_name": "Edge-Cluster-01", "cluster_profile_bindings": [ { "profile_name": "nsx-default-edge-high-availability-profile" } ], "members": [ { "transport_node_name": "EdgeNode-01" } ] } ] } ================================================ FILE: tests/playbooks/topologies/setup_infra/setup_infra_vars_9x.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # # Variables file for Day-0/1 setup # Creates the following: # - 2 Transport Zones # - 1 IP Pool (used by Edge) # - 1 Transport Node Profile with 2 TZ endpoints # - 2 Edge Transport Nodes # - 2 ESX Host Transport Nodes # - 1 Edge Cluster with the 2 Edge Nodes # { # # Flag to create or delete all the objects # Accepts: 'present' to create; 'absent' to delete # "state": "present", # # Common NSX Appliance variables # "nsx_username": "admin", "nsx_password": "myPassword1!myPassword1!", "validate_certs": False, # # First NSX appliance node. Defined separate based on the consumption. # Accepts both IP (IPv4) and FQDN for 'mgmt_ip' # "nsx_node1": { "hostname": "mynsx-01.mylab.local", "mgmt_ip": "10.114.200.41", "datacenter": "Datacenter", "cluster": "Management", "datastore": "datastore36", "portgroup": "VM Network" }, "transport_zones": [ { "display_name": "Overlay-TZ", "description": "NSX Configured Overlay Transport Zone", "tz_type": "OVERLAY_BACKED" }, { "display_name": "VLAN-TZ", "description": "NSX Configured VLAN Transport Zone", "tz_type": "VLAN_BACKED" } ], "ip_pools": [ { "display_name": "TEP-IP-Pool", "pool_static_subnets": [ { "display_name": "TEP_ip_pool_subnet", "state": "present", "allocation_ranges": [ { "start": "172.16.227.50", "end": "172.16.227.59" } ], "gateway_ip": "172.16.227.1", "cidr": "172.16.227.0/24" } ] } ], "transport_node_profiles": [ { "display_name": "TNP-1", "description": "Compute Transport Node Profile", "host_switches": [ { "host_switch_name": "vds7", "host_switch_id": "50 35 17 4a 3d be 59 37-57 d0 0d 03 11 ae 7e a2", "host_switch_type": "VDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-default-uplink-hostswitch-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [], "uplinks": [ { "vds_uplink_name": "Uplink 1", "uplink_name": "uplink-1" }, { "vds_uplink_name": "Uplink 2", "uplink_name": "uplink-2" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ] } ], "transport_node_collections": [ { "state": "present", "display_name": "workload-cluster-1-collection", "description": "TNP for cluster1", "resource_type": "TransportNodeCollection", "compute_manager_name": "vcenter7", "cluster_name": "workload-cluster1", "transport_node_profile_name": "TNP-1" } ], "transport_nodes": [ { "display_name": "EdgeNode-01", "description": "NSX Edge Node 01", "host_switches": [ { "host_switch_name": "nvds", "host_switch_type": "NVDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-edge-single-nic-uplink-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [ { "device_name": "fp-eth0", "uplink_name": "uplink-1" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "node_deployment_info": { "deployment_type": "VIRTUAL_MACHINE", "deployment_config": { "form_factor": "SMALL", "vm_deployment_config": { "vc_name": "vcenter", "vc_username": "administrator@madhu.local", "vc_password": "VMware1!", "storage": "datastore7", "compute": "Management", "host": "10.114.200.7", "data_networks": [ "VM Network", # "lab-dvpg", # "lab-dvpg" ], "default_gateway_addresses": [ "10.114.200.1" ], "management_network": "VM Network", "management_port_subnets": [ { "ip_addresses": [ "10.114.200.14" ], "prefix_length": 27 } ], "placement_type": "VsphereDeploymentConfig" }, "node_user_settings": { "cli_username": "admin" , "root_password": "myPassword1!myPassword1!", "cli_password": "myPassword1!myPassword1!", "audit_username": "audit", "audit_password": "myPassword1!myPassword1!" } }, "node_settings": { "allow_ssh_root_login": "True", "enable_ssh": "True", "hostname": "edgenode-01.madhu.local" }, "resource_type": "EdgeNode", "display_name": "EdgeNode-01" } }, { "resource_type": "TransportNode", "display_name": "esx7-12", "description": "Host Transport Node for ESXi7-12", "host_switches": [ { "host_switch_name": "nsxvswitch", "host_switch_type": "NVDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-default-uplink-hostswitch-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [ { "device_name": "vmnic1", "uplink_name": "uplink-1" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "transport_zone_endpoints": [ ], "node_deployment_info": { "resource_type": "HostNode", "ip_addresses": ["10.114.200.12"], "os_type": "ESXI", "host_credential": { "username": "root", "password": "VMware1!" } } }, { "resource_type": "TransportNode", "display_name": "esx7-c13", "description": "Host Transport Node for esxi c13", "host_switches": [ { "host_switch_name": "nsxvswitch", "host_switch_type": "NVDS", "host_switch_mode": "STANDARD", "host_switch_profiles": [ { "name": "nsx-default-uplink-hostswitch-profile", "type": "UplinkHostSwitchProfile" } ], "pnics": [ { "device_name": "vmnic1", "uplink_name": "uplink-1" } ], "ip_assignment_spec": { "resource_type": "StaticIpPoolSpec", "ip_pool_name": "TEP-IP-Pool" }, "transport_zone_endpoints": [ { "transport_zone_name": "Overlay-TZ" } ] } ], "transport_zone_endpoints": [ ], "node_deployment_info": { "resource_type": "HostNode", "ip_addresses": ["10.114.200.13"], "os_type": "ESXI", "host_credential": { "username": "root", "password": "VMware1!" } } } ], "edge_clusters": [ { "display_name": "Edge-Cluster-01", "cluster_profile_bindings": [ { "profile_name": "nsx-default-edge-high-availability-profile" } ], "members": [ { "transport_node_name": "EdgeNode-01" } ] } ] } ================================================ FILE: tests/playbooks/topologies/upgrade/01_upgrade_upload_mub.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Upload MUB to NSX-T Manager vmware.ansible_for_nsxt.nsxt_upgrade_upload_mub: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" url: "{{ mub_url }}" timeout: "{{ timeout }}" ================================================ FILE: tests/playbooks/topologies/upgrade/02_upgrade_accept_eula.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Accepts EULA vmware.ansible_for_nsxt.nsxt_upgrade_eula_accept: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" ================================================ FILE: tests/playbooks/topologies/upgrade/03_upgrade_uc.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Upgrade UC vmware.ansible_for_nsxt.nsxt_upgrade_uc: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" ================================================ FILE: tests/playbooks/topologies/upgrade/04_upgrade_update_plan.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Update upgrade plan for Hosts vmware.ansible_for_nsxt.nsxt_upgrade_plan: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" component_type: 'host' parallel: False pause_after_each_group: False pause_on_error: True state: 'present' - name: Update upgrade plan for Edges vmware.ansible_for_nsxt.nsxt_upgrade_plan: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" component_type: 'edge' parallel: False pause_after_each_group: False pause_on_error: True state: 'present' ================================================ FILE: tests/playbooks/topologies/upgrade/05_upgrade_update_groups.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Create upgrade groups vmware.ansible_for_nsxt.nsxt_upgrade_groups: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" display_name: "TestGroupAnsible" type: "HOST" parallel: "False" enabled: "true" upgrade_units: "{{ hosts }}" state: "present" # extended_configuration: # - key: "upgrade_mode" # value: "in_place" # - key: "rebootless_upgrade" # value: "true" extended_configuration: - key: "upgrade_mode" value: "maintenance_mode" - key: "rebootless_upgrade" value: "true" - key: "maintenance_mode_config_evacuate_powerd_off_vms" value: "false" state: "present" ================================================ FILE: tests/playbooks/topologies/upgrade/06_upgrade_prechecks.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Run and abort upgrade prechecks vmware.ansible_for_nsxt.nsxt_upgrade_prechecks: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" state: 'present' ================================================ FILE: tests/playbooks/topologies/upgrade/07_upgrade_run.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Runs Upgrade vmware.ansible_for_nsxt.nsxt_upgrade_run: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password }}" validate_certs: "{{ validate_certs }}" paused_upgrade: False ================================================ FILE: tests/playbooks/topologies/upgrade/README.md ================================================ # Upgrade NSX-T Cluster # Overview The set of playbooks in this example does a full NSX upgrade (including Host Transport Nodes and Edge Transport Nodes). The playbooks are divided based on the workflow. There are 7 main playbooks and a common variable files: * 01_upgrade_upload_mub.yml * 02_upgrade_accept_eula.yml * 03_upgrade_uc.yml * 04_upgrade_update_plan.yml * [ OPTIONAL ] 05_upgrade_update_groups.yml * 06_upgrade_prechecks.yml * 07_upgrade_run.yml * upgrade_vars.yml The following playbooks can be used to check the status of different objects during the upgrade process check_upgrade_groups_facts.yml check_upgrade_pre_post_checks_facts.yml check_upgrade_status_summary_facts.yml To run the example, copy all the files two-levels up, edit the variables file to match your needs and run the playbooks in the order listed. Validated against: * NSX-T 2.5 GA --> NSX-T 2.5.1 GA ================================================ FILE: tests/playbooks/topologies/upgrade/check_upgrade_groups_facts.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Get upgrade groups info vmware.ansible_for_nsxt.nsxt_upgrade_groups_facts: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password}}" validate_certs: "{{ validate_certs }}" ================================================ FILE: tests/playbooks/topologies/upgrade/check_upgrade_pre_post_checks_facts.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Get upgrade pre and post checks info vmware.ansible_for_nsxt.nsxt_upgrade_pre_post_checks_facts: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password}}" validate_certs: "{{ validate_certs }}" ================================================ FILE: tests/playbooks/topologies/upgrade/check_upgrade_status_summary_facts.yml ================================================ - hosts: 127.0.0.1 connection: local become: yes vars_files: - upgrade_vars.yml tasks: - name: Get upgrade status summary vmware.ansible_for_nsxt.nsxt_upgrade_status_summary_facts: hostname: "{{ nsx_manager }}" username: "{{ nsx_username }}" password: "{{ nsx_password}}" validate_certs: "{{ validate_certs }}" ================================================ FILE: tests/playbooks/topologies/upgrade/upgrade_vars.yml ================================================ # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # Variables file for Upgrading NSX-T: # - NSX-T Cluster # - Transport Nodes (Edge and Host) # { # # Common NSX Appliance variables # "nsx_manager": "10.114.200.11", "nsx_username": "admin", "nsx_password": "myPassword1!myPassword1!", "validate_certs": False, # Upgrade MUB "mub_url": "http://10.114.200.8/VMware-NSX-upgrade-bundle-2.5.0.1.0.14938184.mub", "timeout": 9000, # Display names of the hosts as seen by NSX which are to be upgraded hosts: [ { "host_name": "esx-c14" }, { "host_name": "esx-c15" } ] } ================================================ FILE: tests/unit/__init__.py ================================================ ================================================ FILE: tests/unit/plugins/__init__.py ================================================ ================================================ FILE: tests/unit/plugins/module_utils/__init__.py ================================================ ================================================ FILE: tests/unit/plugins/module_utils/test_nsxt_base_resource.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import json from unittest.mock import Mock, patch from shutil import copyfile import ansible.module_utils.basic as ansible_basic import sys import os from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_communicator import PolicyCommunicator import ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.nsxt_base_resource as nsxt_base_resource class SimpleDummyNSXTResource(nsxt_base_resource.NSXTBaseRealizableResource): def __init__(self): self.existing_resource_revision = 0 self.resource_class = self.__class__ self.validate_certs = False self.baseline_args = {} @staticmethod def get_resource_base_url(baseline_args=None): return 'dummy' @staticmethod def get_resource_spec(): return { "dummy": dict( required=False ) } class NestedDummyNSXTResource(nsxt_base_resource.NSXTBaseRealizableResource): def __init__(self): self.existing_resource_revision = 0 self.resource_class = self.__class__ self.validate_certs = False self.baseline_args = {} @staticmethod def get_resource_base_url(baseline_args=None): return 'dummy' @staticmethod def get_resource_spec(): return { "dummy": dict( required=False ) } class SubDummyResource1(nsxt_base_resource.NSXTBaseRealizableResource): # This one does not override get_spec_identifier def __init__(self): self.existing_resource_revision = 0 NestedDummyNSXTResource.__init__(self) @staticmethod def get_resource_update_priority(): # Will be updated first return 3 @staticmethod def get_resource_base_url(): return 'sub_dummy1' @staticmethod def get_resource_base_url(parent_info): parent_id = parent_info.get( "NestedDummyNSXTResource_id", 'default') return '{}-sub_dummy1'.format(parent_id) @staticmethod def get_resource_spec(): return { "sub_dummy1": dict( required=True ) } def achieve_subresource_state_if_del_parent(self): # return True if the resource is to be realized with its own # specified state irrespective of the state of its parent resource. return True class SubDummyResource2(nsxt_base_resource.NSXTBaseRealizableResource): # This one overrides get_spec_identifier def __init__(self): self.existing_resource_revision = 0 NestedDummyNSXTResource.__init__(self) @staticmethod def get_resource_update_priority(): # Will be updated second return 2 def get_spec_identifier(self): return ( NestedDummyNSXTResource.SubDummyResource2. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "sub_dummy_res_2" @staticmethod def get_resource_base_url(parent_info): parent_id = parent_info.get( "NestedDummyNSXTResource_id", 'default') return '{}-sub_dummy2'.format(parent_id) @staticmethod def get_resource_spec(): return { "sub_dummy2": dict( required=True ) } class SubDummyResource3(nsxt_base_resource.NSXTBaseRealizableResource): # This one overrides get_spec_identifier # and supports creation of only 1 instance per parent resource def __init__(self): self.existing_resource_revision = 0 NestedDummyNSXTResource.__init__(self) @staticmethod def get_resource_update_priority(): # Will be updated third return 1 def get_spec_identifier(self): return ( NestedDummyNSXTResource.SubDummyResource2. get_spec_identifier()) @classmethod def get_spec_identifier(cls): return "sub_dummy_res_3" @classmethod def allows_multiple_resource_spec(cls): return False @staticmethod def get_resource_base_url(parent_info): parent_id = parent_info.get( "NestedDummyNSXTResource_id", 'default') return '{}-sub_dummy3'.format(parent_id) @staticmethod def get_resource_spec(): return { "sub_dummy3": dict( required=True ) } class MockAnsible(object): def __init__(self, params={}, check_mode=False): self.params = params self.check_mode = check_mode def fail_json(self, *args, **kwargs): pass def exit_json(self, *args, **kwargs): pass class NSXTBaseRealizableResourceTestCase(unittest.TestCase): def setUp(self): self.init_base_resources = nsxt_base_resource.BASE_RESOURCES return super().setUp() def tearDown(self): nsxt_base_resource.BASE_RESOURCES = self.init_base_resources return super().tearDown() @patch('ansible_collections.vmware.ansible_for_nsxt.plugins.' 'module_utils.nsxt_base_resource.PolicyCommunicator') def test_realize(self, mock_policy_communicator): init_base_resources = nsxt_base_resource.BASE_RESOURCES nsxt_base_resource.BASE_RESOURCES = {"NestedDummyNSXTResource"} nested_dummy_resource = NestedDummyNSXTResource() nested_dummy_resource.resource_class = nested_dummy_resource.__class__ mock_policy_communicator_instance = Mock() mock_policy_communicator.get_instance.return_value = ( mock_policy_communicator_instance) mock_policy_communicator_instance.request.return_value = (200, "OK") my_params = {} mock_ansible_module = MockAnsible(params=my_params) nested_dummy_resource.module = mock_ansible_module def test_create(): # when all resources state is present nonlocal my_params my_params.clear() my_params.update({ "hostname": "dummy", "username": "dummy", "password": "dummy", "nsx_cert_path": None, "nsx_key_path": None, "request_headers": None, "ca_path": None, "validate_certs": False, "state": "present", "id": "dummy", "SubDummyResource1": [ { "state": "present", "id": "dummy1" } ], "sub_dummy_res_2": [ { "state": "present", "id": "dummy2-1", }, { "state": "present", "id": "dummy2-2", } ], "sub_dummy_res_3": { "state": "present", "id": "dummy3", }, }) expected_exec_logs = [ { 'body': 'OK', 'resource_type': 'NestedDummyNSXTResource', 'message': ('NestedDummyNSXTResource with id dummy' ' created.'), 'changed': True, 'id': 'dummy' }, { 'body': 'OK', 'resource_type': 'SubDummyResource1', 'message': 'SubDummyResource1 with id dummy1 created.', 'changed': True, 'id': 'dummy1' }, { 'body': 'OK', 'resource_type': 'SubDummyResource2', 'message': 'SubDummyResource2 with id dummy2-1 created.', 'changed': True, 'id': 'dummy2-1' }, { 'body': 'OK', 'resource_type': 'SubDummyResource2', 'message': 'SubDummyResource2 with id dummy2-2 created.', 'changed': True, 'id': 'dummy2-2' }, { 'body': 'OK', 'resource_type': 'SubDummyResource3', 'message': 'SubDummyResource3 with id dummy3 created.', 'changed': True, 'id': 'dummy3' } ] def test_create_base_resource_first(): exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[0]) self.assertEqual(exec_logs[1], expected_exec_logs[1]) self.assertCountEqual(exec_logs[2:4], expected_exec_logs[2:4]) self.assertEqual(exec_logs[4], expected_exec_logs[4]) def test_create_sub_resource_first(): nonlocal my_params my_params['id'] = 'dummy' my_params['create_or_update_subresource_first'] = True my_params["SubDummyResource1"][0]["id"] = "dummy1" my_params["sub_dummy_res_2"][0]["display_name"] = "dummy2-1" my_params["sub_dummy_res_2"][1]["display_name"] = "dummy2-2" my_params["sub_dummy_res_3"]["display_name"] = "dummy3" exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[1]) self.assertCountEqual(exec_logs[1:3], expected_exec_logs[2:4]) self.assertEqual(exec_logs[3], expected_exec_logs[4]) self.assertEqual(exec_logs[4], expected_exec_logs[0]) test_create_base_resource_first() test_create_sub_resource_first() def test_delete(): # when all resources state is absent nonlocal my_params my_params.clear() my_params.update({ "hostname": "dummy", "username": "dummy", "password": "dummy", "nsx_cert_path": None, "nsx_key_path": None, "request_headers": None, "ca_path": None, "validate_certs": False, "state": "absent", "id": "dummy", "SubDummyResource1": [ { "state": "absent", "id": "dummy1" } ], "sub_dummy_res_2": [ { "state": "absent", "id": "dummy2-1" }, { "state": "absent", "id": "dummy2-2" }, ], "sub_dummy_res_3": { "state": "absent", "id": "dummy3" } }) expected_exec_logs = [ { 'msg': 'No SubDummyResource1 exist with id dummy1', 'changed': False, 'resource_type': 'SubDummyResource1' }, { 'msg': 'No SubDummyResource2 exist with id dummy2-1', 'changed': False, 'resource_type': 'SubDummyResource2' }, { 'msg': 'No SubDummyResource2 exist with id dummy2-2', 'changed': False, 'resource_type': 'SubDummyResource2' }, { 'msg': 'No SubDummyResource3 exist with id dummy3', 'changed': False, 'resource_type': 'SubDummyResource3' }, { 'msg': 'No NestedDummyNSXTResource exist with id dummy', 'changed': False, 'resource_type': 'NestedDummyNSXTResource' } ] def test_delete_base_resource_first(): nonlocal my_params my_params['delete_subresource_first'] = False exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[4]) self.assertEqual(exec_logs[1], expected_exec_logs[3]) self.assertEqual(exec_logs[2:4], expected_exec_logs[1:3]) self.assertEqual(exec_logs[4], expected_exec_logs[0]) def test_delete_sub_resource_first(): nonlocal my_params my_params['display_name'] = 'dummy' del my_params['delete_subresource_first'] # SubDummyResource1_id and sub_dummy_res_2_display_name are # deleted from params. Specify them using display_name. # This also tests that user can specify either id or # display_name to identify resource. my_params["SubDummyResource1"][0]["display_name"] = "dummy1" my_params["sub_dummy_res_2"][0]["display_name"] = "dummy2-1" my_params["sub_dummy_res_2"][1]["display_name"] = "dummy2-2" my_params["sub_dummy_res_3"]["display_name"] = "dummy3" exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[3]) self.assertEqual(exec_logs[1:3], expected_exec_logs[1:3]) self.assertEqual(exec_logs[3], expected_exec_logs[0]) self.assertEqual(exec_logs[4], expected_exec_logs[4]) test_delete_base_resource_first() test_delete_sub_resource_first() def test_detached_delete_parent(): # when parent is absent but child is present nonlocal my_params my_params.clear() my_params.update({ "hostname": "dummy", "username": "dummy", "password": "dummy", "nsx_cert_path": None, "nsx_key_path": None, "request_headers": None, "ca_path": None, "validate_certs": False, "state": "absent", "id": "dummy", "SubDummyResource1": [ { "state": "absent", "id": "dummy1" } ], "sub_dummy_res_2": [ { "state": "present", "id": "dummy2-1" }, { "state": "present", "id": "dummy2-2" }, ], "sub_dummy_res_3": { "state": "absent", "id": "dummy3" } }) expected_exec_logs = [ { 'msg': 'No SubDummyResource1 exist with id dummy1', 'changed': False, 'resource_type': 'SubDummyResource1' }, { 'msg': 'No SubDummyResource2 exist with id dummy2-1', 'changed': False, 'resource_type': 'SubDummyResource2' }, { 'msg': 'No SubDummyResource2 exist with id dummy2-2', 'changed': False, 'resource_type': 'SubDummyResource2' }, { 'msg': 'No SubDummyResource3 exist with id dummy3', 'changed': False, 'resource_type': 'SubDummyResource3' }, { 'msg': 'No NestedDummyNSXTResource exist with id dummy', 'changed': False, 'resource_type': 'NestedDummyNSXTResource' }, { 'body': 'OK', 'resource_type': 'SubDummyResource2', 'message': 'SubDummyResource2 with id dummy2-1 created.', 'changed': True, 'id': 'dummy2-1' }, { 'body': 'OK', 'resource_type': 'SubDummyResource2', 'message': 'SubDummyResource2 with id dummy2-2 created.', 'changed': True, 'id': 'dummy2-2' } ] def test_without_flag_achieve_subresource_state_if_del_parent(): exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[3]) self.assertEqual(exec_logs[1:3], expected_exec_logs[1:3]) self.assertEqual(exec_logs[3], expected_exec_logs[0]) self.assertEqual(exec_logs[4], expected_exec_logs[4]) def test_with_flag_achieve_subresource_state_if_del_parent(): nonlocal my_params my_params['achieve_subresource_state_if_del_parent'] = True # Test that user can specify either id or # display_name to identify resource. my_params['display_name'] = 'dummy' my_params["SubDummyResource1"][0]["display_name"] = "dummy1" my_params["sub_dummy_res_2"][0]["display_name"] = "dummy2-1" my_params["sub_dummy_res_2"][0][ "achieve_subresource_state_if_del_parent"] = True my_params["sub_dummy_res_2"][1]["display_name"] = "dummy2-2" my_params["sub_dummy_res_2"][1][ "achieve_subresource_state_if_del_parent"] = True my_params["sub_dummy_res_3"]["display_name"] = "dummy3" exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) print(exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[3]) self.assertEqual(exec_logs[1:3], expected_exec_logs[5:]) self.assertEqual(exec_logs[3], expected_exec_logs[0]) self.assertEqual(exec_logs[4], expected_exec_logs[4]) test_without_flag_achieve_subresource_state_if_del_parent() test_with_flag_achieve_subresource_state_if_del_parent() def test_detached_delete_child(): # when parent is present but child is absent nonlocal my_params my_params.clear() my_params.update({ "hostname": "dummy", "username": "dummy", "password": "dummy", "nsx_cert_path": None, "nsx_key_path": None, "request_headers": None, "ca_path": None, "validate_certs": False, "state": "present", "id": "dummy", "SubDummyResource1": [ { "state": "absent", "id": "dummy1" } ], "sub_dummy_res_2": [ { "state": "absent", "id": "dummy2-1" }, { "state": "absent", "id": "dummy2-2" }, ], "sub_dummy_res_3": { "state": "absent", "id": "dummy3" } }) expected_exec_logs = [ { 'msg': 'No SubDummyResource1 exist with id dummy1', 'changed': False, 'resource_type': 'SubDummyResource1' }, { 'msg': 'No SubDummyResource2 exist with id dummy2-1', 'changed': False, 'resource_type': 'SubDummyResource2' }, { 'msg': 'No SubDummyResource2 exist with id dummy2-2', 'changed': False, 'resource_type': 'SubDummyResource2' }, { 'msg': 'No SubDummyResource3 exist with id dummy3', 'changed': False, 'resource_type': 'SubDummyResource3' }, { 'msg': 'No NestedDummyNSXTResource exist with id dummy', 'changed': False, 'resource_type': 'NestedDummyNSXTResource' }, { 'body': 'OK', 'resource_type': 'NestedDummyNSXTResource', 'message': ('NestedDummyNSXTResource with id dummy' ' created.'), 'changed': True, 'id': 'dummy' } ] exec_logs = [] nested_dummy_resource.realize( successful_resource_exec_logs=exec_logs) print(exec_logs) self.assertEqual(exec_logs[0], expected_exec_logs[5]) self.assertEqual(exec_logs[1], expected_exec_logs[0]) self.assertEqual(exec_logs[2:4], expected_exec_logs[1:3]) self.assertEqual(exec_logs[4], expected_exec_logs[3]) test_create() test_delete() test_detached_delete_parent() test_detached_delete_child() nsxt_base_resource.BASE_RESOURCES = init_base_resources def test_check_for_update(self): simple_dummy_resource = SimpleDummyNSXTResource() def test_with_no_existing_resource(): self.assertFalse(simple_dummy_resource.check_for_update( None, "dummy")) def test_with_same_params(): existing_params = {"dummy": "dummy"} resource_params = {"dummy": "dummy"} self.assertFalse(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_diff_params_simple(): existing_params = {"dummy": "dummy"} resource_params = {"dummy1": "dummy"} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_same_params_list_same_order(): existing_params = {"dummy": ["dummy1", "dummy2"]} resource_params = {"dummy": ["dummy1", "dummy2"]} self.assertFalse(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_same_params_list_different_order(): existing_params = {"dummy": ["dummy1", "dummy2"]} resource_params = {"dummy": ["dummy2", "dummy1"]} self.assertFalse(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_same_params_single_dict(): existing_params = {"dummy": {"dummy": "dummy"}} resource_params = {"dummy": {"dummy": "dummy"}} self.assertFalse(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_diff_params_single_dict(): existing_params = {"dummy": {"dummy": "dummy"}} resource_params = {"dummy": {"dummy1": "dummy"}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) existing_params = {"dummy": {"dummy": "dummy"}} resource_params = {"dummy": {"dummy": "dummy1"}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) existing_params = {"dummy": {"dummy": "dummy"}} resource_params = {"dummy1": {"dummy": "dummy"}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_same_params_multilevel_dict(): existing_params = {"dummy": {"dummy": {"dummy": "dummy"}}} resource_params = {"dummy": {"dummy": {"dummy": "dummy"}}} self.assertFalse(simple_dummy_resource.check_for_update( existing_params, resource_params)) def test_with_diff_params_multilevel_dict(): existing_params = {"dummy": {"dummy": {"dummy": "dummy"}}} resource_params = {"dummy1": {"dummy": {"dummy": "dummy"}}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) existing_params = {"dummy": {"dummy": {"dummy": "dummy"}}} resource_params = {"dummy": {"dummy1": {"dummy": "dummy"}}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) existing_params = {"dummy": {"dummy": {"dummy": "dummy"}}} resource_params = {"dummy": {"dummy": {"dummy1": "dummy"}}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) existing_params = {"dummy": {"dummy": {"dummy": "dummy"}}} resource_params = {"dummy": {"dummy": {"dummy": "dummy1"}}} self.assertTrue(simple_dummy_resource.check_for_update( existing_params, resource_params)) test_with_no_existing_resource() test_with_same_params() test_with_diff_params_simple() test_with_same_params_list_same_order() test_with_same_params_list_different_order() test_with_same_params_single_dict() test_with_diff_params_single_dict() test_with_same_params_multilevel_dict() test_with_diff_params_multilevel_dict() def test_get_attribute(self): simple_dummy_resource = SimpleDummyNSXTResource() mock_ansible_module = MockAnsible() simple_dummy_resource.module = mock_ansible_module init_base_resources = nsxt_base_resource.BASE_RESOURCES nsxt_base_resource.BASE_RESOURCES = {"SimpleDummyNSXTResource"} resource_params = { "dummy": "dummy" } mock_ansible_module.params = resource_params expected_value = "dummy" observed_value = simple_dummy_resource.get_attribute( "dummy", resource_params) self.assertEqual(expected_value, observed_value) expected_value = (nsxt_base_resource.NSXTBaseRealizableResource. INCORRECT_ARGUMENT_NAME_VALUE) observed_value = simple_dummy_resource.get_attribute( "dummy2", resource_params) self.assertEqual(expected_value, observed_value) def test_extract_nsx_resource_params(self): simple_dummy_resource = SimpleDummyNSXTResource() resource_params = { # Note that AnsibleModule can have >= keys than the spec "dummy": "dummy", "redundant_dummy": "dummy" } expected_params = { "dummy": "dummy" } observed_params = ( simple_dummy_resource._extract_nsx_resource_params( resource_params)) self.assertEqual(expected_params, observed_params) @patch('ansible_collections.vmware.ansible_for_nsxt.plugins.' 'module_utils.nsxt_base_resource.PolicyCommunicator') def test_send_request_to_API(self, mock_policy_communicator): mock_policy_communicator.request.return_value = (200, "OK") mock_policy_communicator.get_all_results.return_value = (200, "OK") init_base_resources = nsxt_base_resource.BASE_RESOURCES nsxt_base_resource.BASE_RESOURCES = {"NestedDummyNSXTResource"} # Test get all resources nested_dummy_resource = NestedDummyNSXTResource() nested_dummy_resource.policy_communicator = mock_policy_communicator nested_dummy_resource.validate_certs = False nested_dummy_resource.resource_class = nested_dummy_resource.__class__ nested_dummy_resource._send_request_to_API() self.assertEqual( mock_policy_communicator.get_all_results.call_count, 1) self.assertEqual(mock_policy_communicator.request.call_count, 0) # Test Base Resource mock_policy_communicator.reset_mock() nested_dummy_resource = NestedDummyNSXTResource() nested_dummy_resource.policy_communicator = mock_policy_communicator nested_dummy_resource.validate_certs = False nested_dummy_resource.resource_class = nested_dummy_resource.__class__ nested_dummy_resource._send_request_to_API(suffix="dummy") self.assertEqual(mock_policy_communicator.request.call_count, 1) # Test Sub-Resource mock_policy_communicator.reset_mock() nested_subdummy_resource1 = ( NestedDummyNSXTResource.SubDummyResource1()) nested_subdummy_resource1.policy_communicator = ( mock_policy_communicator) nested_subdummy_resource1.validate_certs = False nested_subdummy_resource1.resource_class = ( nested_subdummy_resource1.__class__) nested_subdummy_resource1._parent_info = { "NestedDummyNSXTResource_id": "dummy" } nested_subdummy_resource1._send_request_to_API(suffix="dummy") self.assertEqual(mock_policy_communicator.request.call_count, 1) # Test when request throws exception with self.assertRaises(Exception): mock_policy_communicator.request = Mock() mock_policy_communicator.request.raiseError.side_effect = Mock( side_effect=Exception) nested_dummy_resource = NestedDummyNSXTResource() nested_dummy_resource.policy_communicator = ( mock_policy_communicator) nested_dummy_resource.validate_certs = False nested_dummy_resource.resource_class = ( nested_dummy_resource.__class__) nested_dummy_resource._send_request_to_API(suffix="dummy") self.assertEqual(mock_policy_communicator.request.call_count, 1) self.assertEqual( mock_policy_communicator.get_all_results.call_count, 0) nsxt_base_resource.BASE_RESOURCES = init_base_resources @patch('ansible_collections.vmware.ansible_for_nsxt.plugins.' 'module_utils.nsxt_base_resource.PolicyCommunicator') def test_achieve_present_state(self, mock_policy_communicator): init_base_resources = nsxt_base_resource.BASE_RESOURCES nsxt_base_resource.BASE_RESOURCES = {"SimpleDummyNSXTResource"} simple_dummy_resource = SimpleDummyNSXTResource() simple_dummy_resource.id = "dummy" simple_dummy_resource.policy_communicator = mock_policy_communicator simple_dummy_resource.module = MockAnsible() simple_dummy_resource.existing_resource = { "_revision": 1 } def test_when_resource_not_updated(): simple_dummy_resource.nsx_resource_params = {} exec_logs = [] # mock_policy_communicator.request.return_value = None simple_dummy_resource._achieve_present_state(exec_logs) self.assertEqual(mock_policy_communicator.request.call_count, 0) expected_exec_logs = [ { "changed": False, "id": simple_dummy_resource.id, "message": "%s with id %s already exists." % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id), "resource_type": simple_dummy_resource.__class__.__name__ } ] self.assertEqual(exec_logs, expected_exec_logs) policy_communicator_request_call_num = 2 def test_when_resource_updated(is_created=False): def test_when_policy_request_succeeds(): nonlocal policy_communicator_request_call_num simple_dummy_resource.nsx_resource_params = { "dummy": "dummy" } simple_dummy_resource.resource_params = {} exec_logs = [] mock_policy_communicator.request.side_effect = [ (200, "OK"), (200, { "_revision": 1 }) ] simple_dummy_resource._achieve_present_state(exec_logs) self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num) policy_communicator_request_call_num += 1 if is_created: expected_message = ("%s with id %s created." % (simple_dummy_resource.__class__. __name__, simple_dummy_resource.id)) else: expected_message = ("%s with id %s updated." % (simple_dummy_resource.__class__. __name__, simple_dummy_resource.id)) expected_exec_logs = [ { "changed": True, "id": simple_dummy_resource.id, "body": "OK", "message": expected_message, "resource_type": ( simple_dummy_resource.__class__.__name__) } ] self.assertEqual(exec_logs, expected_exec_logs) def test_when_policy_request_fails(): nonlocal policy_communicator_request_call_num simple_dummy_resource.nsx_resource_params = { "dummy": "dummy" } exec_logs = [] mock_policy_communicator.request.return_value = Mock( side_effect=Exception) simple_dummy_resource._achieve_present_state(exec_logs) self.assertEqual( mock_policy_communicator.request.call_count, policy_communicator_request_call_num) policy_communicator_request_call_num += 1 test_when_policy_request_succeeds() test_when_policy_request_fails() nonlocal policy_communicator_request_call_num def test_create_new_resource(): simple_dummy_resource.existing_resource = None test_when_resource_updated(is_created=True) test_when_resource_not_updated() test_when_resource_updated() test_create_new_resource() nsxt_base_resource.BASE_RESOURCES = init_base_resources @patch('ansible_collections.vmware.ansible_for_nsxt.plugins.' 'module_utils.nsxt_base_resource.PolicyCommunicator') def test_achieve_absent_state(self, mock_policy_communicator): init_base_resources = nsxt_base_resource.BASE_RESOURCES nsxt_base_resource.BASE_RESOURCES = {"SimpleDummyNSXTResource"} simple_dummy_resource = SimpleDummyNSXTResource() simple_dummy_resource.id = "dummy" simple_dummy_resource.policy_communicator = mock_policy_communicator simple_dummy_resource.module = MockAnsible() def test_when_resource_exists_but_policy_request_fails(): simple_dummy_resource.existing_resource = {} mock_policy_communicator.request.return_value = Mock( side_effect=Exception) exec_logs = [] expected_exec_logs = [] simple_dummy_resource._achieve_absent_state(exec_logs) self.assertEqual(exec_logs, expected_exec_logs) def test_when_resource_exists_and_policy_request_succeeds(): simple_dummy_resource.existing_resource = {} mock_policy_communicator.request.side_effect = [ (200, "OK"), Mock(side_effect=Exception) ] exec_logs = [] expected_exec_logs = [ { "changed": True, "id": simple_dummy_resource.id, "message": "%s with id %s deleted." % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id) } ] simple_dummy_resource._achieve_absent_state(exec_logs) self.assertEqual(exec_logs, expected_exec_logs) def test_when_resource_does_not_exist(): simple_dummy_resource.existing_resource = None exec_logs = [] expected_exec_logs = [ { "changed": False, "msg": 'No %s exist with id %s' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id), "resource_type": simple_dummy_resource.__class__.__name__ } ] simple_dummy_resource._achieve_absent_state(exec_logs) self.assertEqual(exec_logs, expected_exec_logs) test_when_resource_exists_but_policy_request_fails() test_when_resource_exists_and_policy_request_succeeds() test_when_resource_does_not_exist() nsxt_base_resource.BASE_RESOURCES = init_base_resources def test_get_sub_resources_class_of(self): nested_dummy_resource = NestedDummyNSXTResource() expected_values = [NestedDummyNSXTResource.SubDummyResource1, NestedDummyNSXTResource.SubDummyResource2, NestedDummyNSXTResource.SubDummyResource3] observed_values = list( nested_dummy_resource._get_sub_resources_class_of( nested_dummy_resource.__class__)) self.assertCountEqual(expected_values, observed_values) def test_fill_missing_resource_params(self): simple_dummy_resource = SimpleDummyNSXTResource() def test_simple(): def test_overwrite(): existing_params = { "dummy": "dummy" } resource_params = expected_resource_params = { "dummy": "new_dummy" } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) def test_missing(): existing_params = { "dummy": "dummy" } resource_params = { "dummy1": "dummy1" } expected_resource_params = { "dummy": "dummy", "dummy1": "dummy1" } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) test_overwrite() test_missing() def test_with_dict(): def test_overwrite(): existing_params = { "dummy": "dummy", "dummy1": { "dummy2": "dummy2" } } resource_params = expected_resource_params = { "dummy": "dummy", "dummy1": { "dummy2": "dummy3" } } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) def test_missing(): existing_params = { "dummy": "dummy", "dummy1": { "dummy2": "dummy2", "dummy3": { "dummy4": "dummy4" } }, "dummy5": { "dummy6": "dummy6" } } resource_params = { "dummy": "dummy1", "dummy1": { "dummy2": "dummy2" } } expected_resource_params = { "dummy": "dummy1", "dummy1": { "dummy2": "dummy2", "dummy3": { "dummy4": "dummy4" } }, "dummy5": { "dummy6": "dummy6" } } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) test_overwrite() test_missing() def test_with_list(): def test_overwrite(): existing_params = { "dummy": "dummy", "dummy1": ["dummy1"] } resource_params = expected_resource_params = { "dummy": "dummy", "dummy1": ["dummy2"] } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) def test_missing(): existing_params = { "dummy": "dummy", "dummy1": ["dummy1"] } resource_params = { "dummy": "dummy1" } expected_resource_params = { "dummy": "dummy1", "dummy1": ["dummy1"] } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) test_overwrite() test_missing() def test_with_dict_and_list(): def test_overwrite(): existing_params = { "dummy": "dummy", "dummy1": ["dummy1"], "dummy2": { "dummy3": ["dummy3"] } } resource_params = expected_resource_params = { "dummy": "dummy", "dummy1": ["dummy2"], "dummy2": { "dummy3": ["dummy4"] } } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) def test_missing(): existing_params = { "dummy": "dummy", "dummy1": ["dummy1"], "dummy2": { "dummy3": ["dummy3"] } } resource_params = { "dummy": "dummy", "dummy1": ["dummy2"], "dummy2": { "dummy4": "dummy4" } } expected_resource_params = { "dummy": "dummy", "dummy1": ["dummy2"], "dummy2": { "dummy3": ["dummy3"], "dummy4": "dummy4" } } simple_dummy_resource._fill_missing_resource_params( existing_params, resource_params) self.assertEqual(resource_params, expected_resource_params) test_overwrite() test_missing() test_simple() test_with_dict() test_with_list() test_with_dict_and_list() ================================================ FILE: tests/unit/plugins/module_utils/test_policy_communicator.py ================================================ #!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2018 VMware, Inc. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import json from unittest.mock import Mock, patch from ansible_collections.vmware.ansible_for_nsxt.plugins.module_utils.policy_communicator import PolicyCommunicator from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError class PolicyCommunicatorTestCase(unittest.TestCase): def setUp(self): self.policy_communicator = PolicyCommunicator.get_instance( "dummy", "dummy", "dummy") def test_get_instance_with_same_credentials(self): pc1 = PolicyCommunicator.get_instance("dummy1", "dummy1", "dummy1") pc2 = PolicyCommunicator.get_instance("dummy1", "dummy1", "dummy1") self.assertEqual(pc1, pc2) def test_get_instance_with_different_credentials(self): pc1 = PolicyCommunicator.get_instance("dummy1", "dummy1", "dummy1") pc2 = PolicyCommunicator.get_instance("dummy2", "dummy2", "dummy2") self.assertNotEqual(pc1, pc2) @patch("ansible_collections.vmware.ansible_for_nsxt.plugins." "module_utils.policy_communicator.open_url") def test_request_success_policy_response_with_success(self, mock_open_url): pc = self.policy_communicator expected_rc = 200 expected_response = '{"dummy": "dummy"}' mock_response = Mock() mock_response.getcode.return_value = expected_rc mock_response.read.return_value.decode.return_value = expected_response mock_open_url.return_value = mock_response rc, response = pc.request("dummy") self.assertEqual(rc, 200) self.assertEqual(response, json.loads(expected_response)) @patch("ansible_collections.vmware.ansible_for_nsxt.plugins." "module_utils.policy_communicator.open_url") def test_request_success_policy_response_with_none(self, mock_open_url): pc = self.policy_communicator expected_rc = 200 expected_response = None mock_fp = Mock() mock_fp.getcode.return_value = 200 mock_fp.read.return_value.decode.return_value = expected_response mock_open_url.side_effect = HTTPError( url="dummy", code=200, msg=None, fp=mock_fp, hdrs=None) rc, response = pc.request("dummy") self.assertEqual(rc, 200) self.assertEqual(response, None) @patch("ansible_collections.vmware.ansible_for_nsxt.plugins." "module_utils.policy_communicator.open_url") def test_request_success_policy_response_with_error(self, mock_open_url): pc = self.policy_communicator expected_rc = 200 expected_response = '{"error_code": "5000212"}' mock_fp = Mock() mock_fp.getcode.return_value = 200 mock_fp.read.return_value.decode.return_value = expected_response mock_open_url.side_effect = HTTPError( url="dummy", code="dummy", msg=None, fp=mock_fp, hdrs=None) with self.assertRaises(Exception): rc, response = pc.request("dummy") @patch("ansible_collections.vmware.ansible_for_nsxt.plugins." "module_utils.policy_communicator.open_url") def test_request_failure(self, mock_open_url): pc = self.policy_communicator mock_response = Mock() mock_response.getcode.return_value = 400 mock_response.read.return_value.decode.return_value = ( '{"dummy": "dummy"}') mock_open_url.return_value = mock_response with self.assertRaises(Exception): rc, response = pc.request("dummy")