Repository: escapecloud/cloudexit Branch: main Commit: 290c8ae9026d Files: 39 Total size: 299.7 KB Directory structure: gitextract_4srv3mcm/ ├── .github/ │ └── workflows/ │ └── pr-checks.yml ├── .gitignore ├── LICENSE ├── README.md ├── assets/ │ ├── css/ │ │ └── style.css │ └── template/ │ └── index.html ├── config/ │ ├── aws_example.json │ └── azure_example.json ├── config.py ├── core/ │ ├── __init__.py │ ├── engine.py │ ├── utils.py │ ├── utils_aws.py │ ├── utils_azure.py │ ├── utils_db.py │ ├── utils_report.py │ ├── utils_report_common.py │ ├── utils_report_html.py │ ├── utils_report_json.py │ ├── utils_report_pdf.py │ └── utils_sync.py ├── main.py ├── publiccode.yml ├── requirements-dev.txt ├── requirements.txt ├── tests/ │ ├── __init__.py │ ├── report_fixtures.py │ ├── test_report_pipeline.py │ ├── test_report_transforms.py │ ├── test_utils_and_main.py │ └── test_validate.py └── utils/ ├── aws.py ├── azure.py ├── connection.py ├── constants.py ├── data.py ├── sync.py ├── utils.py └── validate.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/pr-checks.yml ================================================ name: PR Checks on: pull_request: types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read env: PYTHON_VERSION: "3.14" jobs: check-linting: if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Set up Python uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: pip - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements-dev.txt - name: Ruff run: ruff check main.py core utils tests check-types: if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Set up Python uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: pip - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements-dev.txt - name: Black run: black --check main.py core utils tests check-tests: if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Set up Python uses: actions/setup-python@v6 with: python-version: ${{ env.PYTHON_VERSION }} cache: pip - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements-dev.txt - name: Unit tests run: python -m unittest discover -s tests ================================================ FILE: .gitignore ================================================ # Created by https://www.toptal.com/developers/gitignore/api/macos,windows,linux,visualstudiocode,python,node # Edit at https://www.toptal.com/developers/gitignore?templates=macos,windows,linux,visualstudiocode,python,node ### Linux ### *~ # temporary files which can be created if a process still has a handle open of a deleted file .fuse_hidden* # KDE directory preferences .directory # Linux trash folder which might appear on any partition or disk .Trash-* # .nfs files are created when an open file is removed but is still being accessed .nfs* ### macOS ### # General .DS_Store .AppleDouble .LSOverride # Icon must end with two \r Icon # Thumbnails ._* # Files that might appear in the root of a volume .DocumentRevisions-V100 .fseventsd .Spotlight-V100 .TemporaryItems .Trashes .VolumeIcon.icns .com.apple.timemachine.donotpresent # Directories potentially created on remote AFP share .AppleDB .AppleDesktop Network Trash Folder Temporary Items .apdisk ### macOS Patch ### # iCloud generated files *.icloud ### Node ### # Logs logs *.log npm-debug.log* yarn-debug.log* yarn-error.log* lerna-debug.log* .pnpm-debug.log* # Diagnostic reports (https://nodejs.org/api/report.html) report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json # Runtime data pids *.pid *.seed *.pid.lock # Directory for instrumented libs generated by jscoverage/JSCover lib-cov # Coverage directory used by tools like istanbul coverage *.lcov # nyc test coverage .nyc_output # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) .grunt # Bower dependency directory (https://bower.io/) bower_components # node-waf configuration .lock-wscript # Compiled binary addons (https://nodejs.org/api/addons.html) build/Release # Dependency directories node_modules/ jspm_packages/ # Snowpack dependency directory (https://snowpack.dev/) web_modules/ # Generated reports reports/ # Generated reports config/aws.json config/azure.json # TypeScript cache *.tsbuildinfo # Optional npm cache directory .npm # Optional eslint cache .eslintcache # Optional stylelint cache .stylelintcache # Microbundle cache .rpt2_cache/ .rts2_cache_cjs/ .rts2_cache_es/ .rts2_cache_umd/ # Optional REPL history .node_repl_history # Output of 'npm pack' *.tgz # Yarn Integrity file .yarn-integrity # dotenv environment variable files .env .env.development.local .env.test.local .env.production.local .env.local # parcel-bundler cache (https://parceljs.org/) .cache .parcel-cache # Next.js build output .next out # Nuxt.js build / generate output .nuxt dist # Gatsby files .cache/ # Comment in the public line in if your project uses Gatsby and not Next.js # https://nextjs.org/blog/next-9-1#public-directory-support # public # vuepress build output .vuepress/dist # vuepress v2.x temp and cache directory .temp # Docusaurus cache and generated files .docusaurus # Serverless directories .serverless/ # FuseBox cache .fusebox/ # DynamoDB Local files .dynamodb/ # TernJS port file .tern-port # Stores VSCode versions used for testing VSCode extensions .vscode-test # yarn v2 .yarn/cache .yarn/unplugged .yarn/build-state.yml .yarn/install-state.gz .pnp.* ### Node Patch ### # Serverless Webpack directories .webpack/ # Optional stylelint cache # SvelteKit build / generate output .svelte-kit ### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide .pdm.toml # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ ### VisualStudioCode ### .vscode/* !.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json !.vscode/*.code-snippets # Local History for Visual Studio Code .history/ # Built Visual Studio Code Extensions *.vsix ### VisualStudioCode Patch ### # Ignore all local history of files .history .ionide ### Windows ### # Windows thumbnail cache files Thumbs.db Thumbs.db:encryptable ehthumbs.db ehthumbs_vista.db # Dump file *.stackdump # Folder config file [Dd]esktop.ini # Recycle Bin used on file shares $RECYCLE.BIN/ # Windows Installer files *.cab *.msi *.msix *.msm *.msp # Windows shortcuts *.lnk # End of https://www.toptal.com/developers/gitignore/api/macos,windows,linux,visualstudiocode,python,node output/ logs/ *.xlsx ================================================ FILE: LICENSE ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: README.md ================================================ ![CloudExit](./docs/images/Main.png) # cloudexit – Cloud Exit Assessment (Open Source) cloudexit is an open-source tool that helps cloud engineers and technical teams assess **cloud exit readiness**. It provides a structured, repeatable way to understand: - what cloud services are in use - where vendor lock-in risks exist - how difficult an exit scenario would be - what alternative technologies are available cloudexit runs **locally by default**, with no account required. --- ## How cloudexit fits into the EscapeCloud ecosystem cloudexit is the **Community / Open Source edition** of the EscapeCloud ecosystem. - **cloudexit (this repository)** Open-source, offline-first assessment engine - **exitcloud.io** Lightweight Cloud Exit Readiness Platform for individuals, SMEs, and MSPs - **escapecloud.io** Enterprise Cloud Exit Readiness Platform with advanced reporting and governance cloudexit can be used: - fully offline (Basic assessment) - or connected to a platform (exitcloud.io / escapecloud.io) for richer reports and scoring --- ## Documentation 📘 **Full documentation:** 👉 https://cloudexit.escapecloud.io The documentation covers: - getting started and prerequisites - running assessments - cloud providers and permissions - reports and scores - connected mode (exitcloud.io / escapecloud.io) - troubleshooting and contribution guidelines --- ## License cloudexit is licensed under the **GNU Affero General Public License v3 (AGPL-3.0)** See the [LICENSE](https://www.gnu.org/licenses/agpl-3.0.html) file for details. --- ## Contributing Contributions are welcome. You can contribute by: - reporting issues - improving documentation - submitting pull requests Please see the documentation for contribution guidelines. ================================================ FILE: assets/css/style.css ================================================ /* ======================================================== Base: Variables ======================================================== */ :root { /* Blue */ --blue-100: #dbe6fe; --blue-800: #1e4baf; /* Green */ --green-100: #dcfce7; --green-600: #16a34a; --green-700: #047854; /* Neutral */ --neutral-50: #f9fbfb; --neutral-100: #f3f6f6; --neutral-200: #e5ebeb; --neutral-300: #d1dbdb; --neutral-400: #9cafae; --neutral-600: #4b6361; --neutral-800: #1f3735; --neutral-900: #112726; /* Primary */ --primary-600: #0d948b; --primary-800: #115e59; --primary-950: #042f2c; /* Red */ --red-50: #fef2f2; --red-100: #fee2e2; --red-700: #b91c1c; --red-800: #991b1b; /* Yellow */ --yellow-100: #fee4c7; --yellow-850: #92400e; /* Color Palette */ --white: #fff; /* Transition Defaults */ --transition-speed: 0.3s; /* Font Sizes */ --text-heading-2: 24px; --text-heading-3: 20px; --text-body: 16px; --text-label: 14px; --text-label-small: 12px; /* Radius */ --rounded-md: 12px; --rounded-sm: 8px; --rounded-xs: 4px; /* Sidebar */ } /* ======================================================== Base: Reset ======================================================== */ * { margin: 0; padding: 0; -webkit-box-sizing: border-box; box-sizing: border-box; } body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif; line-height: 1.375; font-weight: 400; color: var(--neutral-900); background: var(--neutral-100); text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-font-smoothing: antialiased; } img { max-width: 100%; height: auto; } ul { list-style: none; padding: 0; margin: 0; } .divider-border { height: 1px; background: var(--neutral-200); } .container { } /* ======================================================== Base: Typography ======================================================== */ a { text-decoration: none; -webkit-transition: all var(--transition-speed) ease; -o-transition: all var(--transition-speed) ease; transition: all var(--transition-speed) ease; } a:hover { text-decoration: none !important; } h2, h3 { color: var(--neutral-900); font-weight: 500; } h2, h3, li, p { margin: 0; } h2 { font-size: var(--text-heading-2); line-height: 1.33; } h3 { font-size: var(--text-heading-3); line-height: 1.4; } body, p { font-size: var(--text-body); line-height: 1.375; } .label, label { font-size: var(--text-label); line-height: 1.43; } .small, label.small { font-size: var(--text-label-small); line-height: 1.33; } .bg-white { background: var(--white); } a*:active, a*:focus { outline: 0; border: 0; } main { padding-bottom: 32px; padding-top: 88px; } /* Scrollbars */ ::-webkit-scrollbar { width: 5px; border-radius: var(--rounded-sm); } ::-webkit-scrollbar-track { background: var(--neutral-100); border-radius: var(--rounded-sm); } ::-webkit-scrollbar-thumb { background: var(--primary-600); border-radius: var(--rounded-sm); } ::-webkit-scrollbar-thumb:hover { background: var(--primary-800); border-radius: var(--rounded-sm); } .dropdown-item span { font-size: var(--text-label); font-weight: 400; color: var(--neutral-900); } .dropdown-item { padding-top: 8px; padding-bottom: 8px; } .dropdown-menu { padding: 0; } .dropdown-menu li a { line-height: 1.5; } /* ======================================================== Layout: Shared Cards And Charts ======================================================== */ .chart-card-head { padding: 16px 24px; } .chart-card-head h3 { margin-bottom: 5px; color: var(--neutral-900); } .chart-card-head h6 { color: var(--neutral-600); font-size: var(--text-label); font-weight: 400; } .chart-card-head h6 span { display: inline-block; font-weight: 500; } .risk-header { display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-pack: justify; -ms-flex-pack: justify; justify-content: space-between; -webkit-box-align: center; -ms-flex-align: center; align-items: center; /* padding: 16px 24px; */ } .risk-dashboard { display: flex; flex-direction: column; height: 100%; } .risk-count h2 { color: var(--neutral-600); font-size: var(--text-label); font-weight: 400; } .risk-count .count { font-size: var(--text-heading-3); font-weight: 500; color: var(--neutral-900); margin: 0; } .chart-container { display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; -webkit-box-align: center; -ms-flex-align: center; align-items: center; flex: 1; } .chart-box { height: 350px; } .chart-empty-state { flex: 1; min-height: 220px; display: flex; align-items: center; justify-content: center; text-align: center; } .chart-empty-state-inner { display: flex; flex-direction: column; align-items: center; gap: 10px; } .chart-empty-state i { font-size: 28px; line-height: 1; color: var(--neutral-900); } .chart-empty-state p { margin: 0; color: var(--neutral-700); } .alt-tech-empty-state { min-height: 260px; } .scoring-empty-state { min-height: 300px; } .scoring-card { overflow: hidden; } .scoring-inner { display: flex; justify-content: center; align-items: center; height: 100%; min-height: 300px; } .chart-wrapper { max-width: 350px; overflow: hidden; position: relative; border-radius: 12px; mask-image: linear-gradient(to bottom, black 85%, transparent 100%); -webkit-mask-image: linear-gradient(to bottom, black 85%, transparent 100%); } .scoring { width: 100%; max-width: 420px; height: 320px !important; margin: 0 auto; } .form-title { font-size: var(--text-heading-3); font-weight: 500; line-height: 1.33; color: var(--neutral-900); } .shadow-s { -webkit-box-shadow: 0 1px 4px 0 rgba(0, 0, 0, 0.1); box-shadow: 0 1px 4px 0 rgba(0, 0, 0, 0.1); } .input { border: 1px solid var(--neutral-300); } /* ======================================================== Components: Buttons ======================================================== */ .dropdown-toggle::after { float: right; margin-top: 8px; } /* ======================================================== Components: Summary, Scoring, Resources, Alt Tech Cards ======================================================== */ ::-webkit-input-placeholder { color: var(--neutral-600); font-size: var(--text-body); font-style: normal; font-weight: 400; line-height: 38px; } ::-moz-placeholder { color: var(--neutral-600); font-size: var(--text-body); font-style: normal; font-weight: 400; line-height: 38px; } .resource-card { border: 1px solid var(--neutral-200); } .resource-card h3 { font-weight: 500; min-height: 44px; font-size: var(--text-label); font-size: 16px; font-weight: 500; font-stretch: normal; line-height: 1.38; color: var(--neutral-900); } .resource-card h6 { font-size: var(--text-label); font-weight: 400; margin: 0; } .resource-card { height: 100%; } .resource-card p { margin-top: 16px; margin-bottom: 16px; } .resource-card img { max-width: 100%; margin: 0 0 16px; height: 32px; } .gapy-3 { gap: 16px 0; } .alttech-card { border: 1px solid var(--neutral-200); height: 100%; } .alttech-title { } .alttech-title h5 { font-size: var(--text-body); margin: 0; color: var(--neutral-900); } .alttech-title h6 { font-size: var(--text-label-small); font-weight: 400; } .green-700 { color: var(--green-700); } .alttech-text p { /*overflow: hidden;*/ /*display: -webkit-box;*/ font-size: 14px; -webkit-line-clamp: 2; -webkit-box-orient: vertical; } .sync-status { gap: 28px 0; } .sync-status-text { } .sync-status-text h5 { margin: 0; font-size: var(--text-body); color: var(--neutral-900); } .sync-status-text h6 { color: var(--neutral-600); font-size: var(--text-label); margin-bottom: 5px; font-weight: 400; } .sync-status-icon span{ border-radius: 8px; background:rgba(5, 81, 96, 0.10); height: 32px; width: 32px; display: flex; align-items: center; justify-content: center; } .view-more button { background: transparent; border: 0; } /* ======================================================== Components: Risk Table ======================================================== */ .risk-title-cell { position: relative; padding-left: 40px !important; cursor: pointer; } .chevron-icon { position: absolute; left: 16px; top: 50%; -webkit-transform: translateY(-50%); -ms-transform: translateY(-50%); transform: translateY(-50%); font-size: var(--text-label-small); color: var(--neutral-600); -webkit-transition: -webkit-transform 0.2s ease; transition: -webkit-transform 0.2s ease; -o-transition: transform 0.2s ease; transition: transform 0.2s ease; transition: transform 0.2s ease, -webkit-transform 0.2s ease; } .risk-row.expanded .chevron-icon { -webkit-transform: translateY(-50%) rotate(90deg); -ms-transform: translateY(-50%) rotate(90deg); transform: translateY(-50%) rotate(90deg); } .expandable-content { display: none; background-color: #f5f5f5; } .expandable-content.show { display: table-row; } .expandable-content td { padding-left: 20px; } .risk-table-container .table tr th { font-weight: 500; font-size: var(--text-label); } .risk-table-container .table th:first-child, .risk-table-container .table td:first-child { width: auto !important; } .risk-table-container .table tr th:nth-child(3), .risk-table-container .table tr th:nth-child(4) { text-align: center; } .impacted-count { text-align: center; } .description { background: var(--neutral-200); padding-top: 12px; padding-bottom: 12px; } .description-section { margin-bottom: 10px; } .risk-title { color: var(--neutral-900); } .section-label { font-size: var(--text-label-small); font-weight: 400; color: var(--neutral-600); } .section-content { font-size: var(--text-label); } .impacted-resources-content { font-size: var(--text-label); } .severity-badge { color: var(--colors-red-800); font-size: var(--text-label); font-weight: 500; padding: 3px 12px; border-radius: var(--rounded-md); } .severity-high { background-color: var(--red-100); color: var(--red-800); } .severity-medium { background: var(--yellow-100); color: var(--yellow-850); } .severity-low { background: var(--blue-100); color: var(--blue-800); } .btn { border-radius: 6px; line-height: 40px; padding: 0 16px; font-weight: 500; -webkit-transition: 0.4s; -o-transition: 0.4s; transition: 0.4s; } .btn-primary { background: var(--primary-800); color: var(--white); } .btn-outline-primary { color: var(--primary-800); } .btn-light { background: var(--neutral-100); border: 1px solid var(--neutral-100); color: var(--neutral-600); } .btn-outline-primary, .btn-primary { border: 1px solid var(--primary-800); } .btn-sm { font-size: var(--text-label); font-weight: 500; line-height: 34px; } .btn-primary:hover { background: var(--primary-950); border-color: var(--primary-950); } .btn-primary:hover svg:not(.filter-icon) path { fill: var(--white); } .btn-outline-primary:focus svg:not path, .btn-outline-primary:active svg:not path, .btn-outline-primary:hover svg:not path { fill: var(--white); } .btn-outline-primary:hover svg.filter-icon path { stroke: var(--white); } .btn-outline-primary:active, .btn-outline-primary:hover { background: var(--primary-800) !important; border-color: var(--primary-800) !important; color: var(--white) !important; } .btn-outline-primary:active svg path, .btn-outline-primary:hover svg path { fill: white; } .btn-light:hover { background: var(--neutral-200); border-color: var(--neutral-200); } .dropdown-toggle::after { border: 0; display: none; } .dropdown-toggle { display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-align: center; -ms-flex-align: center; align-items: center; -webkit-box-pack: center; -ms-flex-pack: center; justify-content: center; gap: 8px; padding-left: 16px; padding-right: 16px; } .btn-sm.dropdown-toggle { padding-left: 12px; padding-right: 12px; } .btn:focus-visible, .btn.show:focus-visible, .btn:first-child:active:focus-visible, :not(.btn-check) + .btn:active:focus-visible { -webkit-box-shadow: none !important; box-shadow: none !important; } .btn-primary, .btn-outline-primary { /* --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); --bs-btn-focus-shadow-rgb: 13, 110, 253; --bs-btn-disabled-bg: transparent; --bs-btn-active-color: var(--neutral-white); --bs-btn-hover-color: var(--neutral-white); --bs-gradient: none; */ } .btn-outline-primary { --bs-btn-color: var(--primary-800); --bs-btn-border-color: var(--primary-800); --bs-btn-hover-color: var(--white); --bs-btn-hover-bg: var(--primary-800); --bs-btn-hover-border-color: var(--primary-800); --bs-btn-active-color: var(--white); --bs-btn-active-bg: var(--primary-800); --bs-btn-active-border-color: var(--primary-800); --bs-btn-disabled-color: var(--neutral-400); --bs-btn-disabled-border-color: var(--neutral-300); --bs-btn-focus-shadow-rgb: 17, 94, 89; } .btn-outline-primary.show, .btn-check:checked + .btn-outline-primary, .btn-check:active + .btn-outline-primary, .btn-outline-primary:focus, .btn-outline-primary:focus-visible { background: var(--primary-800) !important; border-color: var(--primary-800) !important; color: var(--white) !important; } .btn-outline-primary.show svg.filter-icon path, .btn-check:checked + .btn-outline-primary svg.filter-icon path, .btn-check:active + .btn-outline-primary svg.filter-icon path, .btn-outline-primary:focus svg.filter-icon path, .btn-outline-primary:focus-visible svg.filter-icon path { stroke: var(--white); } .btn-primary.disabled, .btn-primary:disabled { border-color: var(--neutral-400); background: var(--neutral-400); color: var(--white); } .btn-outline-primary.disabled, .btn-outline-primary:disabled { border-color: var(--neutral-300); color: var(--neutral-400); } /* ======================================================== Components: Filter Toggles ======================================================== */ .toggle-switch { position: relative; width: 44px; height: 24px; } .toggle-switch input[type="checkbox"] { opacity: 0; width: 0; height: 0; } .toggle-slider { position: absolute; cursor: pointer; top: 0; left: 0; right: 0; bottom: 0; background-color: var(--neutral-200); -webkit-transition: 0.3s ease; -o-transition: 0.3s ease; transition: 0.3s ease; border-radius: var(--rounded-md); } .toggle-slider:before { position: absolute; content: ""; height: 20px; width: 20px; left: 2px; bottom: 2px; background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20' fill='none'%3E%3Ccircle cx='10' cy='10' r='10' fill='%23E5EBEB'/%3E%3Cpath d='M6.5 6.5L13.5 13.5M13.5 6.5L6.5 13.5' stroke='%234B6361' stroke-width='1.6' stroke-linecap='round'/%3E%3C/svg%3E"); } .toggle-slider:before { background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20' fill='none'%3E%3Ccircle cx='10' cy='10' r='10' fill='%23E5EBEB'/%3E%3Cpath d='M6.5 6.5L13.5 13.5M13.5 6.5L6.5 13.5' stroke='%234B6361' stroke-width='1.6' stroke-linecap='round'/%3E%3C/svg%3E"); background-repeat: no-repeat; background-position: center; background-size: 20px 20px; } input:checked + .toggle-slider { background-color: var(--primary-800); } input:checked + .toggle-slider:before { -webkit-transform: translateX(20px); -ms-transform: translateX(20px); transform: translateX(20px); background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20' fill='none'%3E%3Ccircle cx='10' cy='10' r='10' fill='%230D948B'/%3E%3Cpath d='M5.5 10.5L8.5 13.5L14.5 7.5' stroke='white' stroke-width='1.8' stroke-linecap='round' stroke-linejoin='round'/%3E%3C/svg%3E"); background-repeat: no-repeat; background-position: center; background-size: 20px 20px; } /* ======================================================== Components: Tables ======================================================== */ table:not("#assessmentsTable") { table-layout: fixed; width: 100%; } .table > :not(caption) > * > * { color: var(--neutral-900); font-size: var(--text-body); padding-top: 16px; padding-bottom: 16px; border-color: var(--neutral-200); } tr:last-child td { border-bottom: 0; } .risk-table-container { overflow-x: auto; -webkit-overflow-scrolling: touch; } .risk-table-container table { min-width: 420px; } .risk-table-container::-webkit-scrollbar { height: 20px; } .risk-table-container::-webkit-scrollbar-track { background: var(--white); } .risk-table-container::-webkit-scrollbar-thumb { background: var(--neutral-300); border-radius: 20px; } .risk-table-container::-webkit-scrollbar-thumb:hover { background: var(--neutral-600); } th:first-child, td:first-child { width: 50px !important; font-weight: 500; } td .number, td button { margin-left: auto; margin-right: auto; } /* ======================================================== Layout: Main Content And Forms ======================================================== */ #main-content { height: 100%; width: 100%; overflow: hidden; padding: 20px 60px 20px 60px; transition: all 0.3s linear; -webkit-transition: all 0.3s linear; } .visit span { font-size: 14px; word-break: break-all; } .form-control, .form-select { color: var(--neutral-600); font-size: var(--text-body); font-style: normal; font-weight: 400; border-radius: 16px; border: 1px solid var(--neutral-300); background: var(--white); -webkit-transition: border-color 0.3s ease, box-shadow 0.3s ease; -o-transition: border-color 0.3s ease, box-shadow 0.3s ease; transition: border-color 0.3s ease, box-shadow 0.3s ease; -webkit-box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05); box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05); } .form-control:focus, .form-select:focus { border-color: var(--primary-800); box-shadow: 0 0 0 0.2rem rgba(17, 94, 89, 0.12); } .custom-search { min-width: 240px; height: 36px; border-radius: 8px; background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='20' height='20' viewBox='0 0 20 20' fill='none'%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M9 3.5C5.96243 3.5 3.5 5.96243 3.5 9C3.5 12.0376 5.96243 14.5 9 14.5C10.519 14.5 11.893 13.8852 12.8891 12.8891C13.8852 11.893 14.5 10.519 14.5 9C14.5 5.96243 12.0376 3.5 9 3.5ZM2 9C2 5.13401 5.13401 2 9 2C12.866 2 16 5.13401 16 9C16 10.6625 15.4197 12.1906 14.4517 13.3911L17.7803 16.7197C18.0732 17.0126 18.0732 17.4874 17.7803 17.7803C17.4874 18.0732 17.0126 18.0732 16.7197 17.7803L13.3911 14.4517C12.1906 15.4197 10.6625 16 9 16C5.13401 16 2 12.866 2 9Z' fill='%236B807F'/%3E%3C/svg%3E") !important; background-repeat: no-repeat !important; background-position: 16px center !important; padding: 0 16px 0 48px !important; width: 400px; } .btn-clear:hover { text-decoration: underline; } .alt-tech-card { background: #f5f5f5; border-radius: 10px; } .alt-tech-card h3 { font-size: 18px; margin: 0; } /* ======================================================== Components: Alternative Technology Status And Hints ======================================================== */ .verified { background: var(--green-600); color: var(--white); } .green-100 { background: var(--green-100); } .green-700 { color: var(--green-700); } .tags span { background: var(--neutral-100); padding: 4px 8px; border-radius: 6px; font-size: var(--text-label); color: var(--neutral-700); font-weight: 500; } .verified span { font-size: var(--text-label); } .red-700 { color: var(--red-700); } .red-50 { background: var(--red-50); } .info-hint-box { position: relative; display: inline-block; margin-left: 6px; cursor: pointer; } .info-hint-box i { font-size: 16px; color: var(--primary-600, #007bff); transition: color 0.2s; } .info-hint-box:hover i { color: var(--primary-800, #0056b3); } .hint-hoverbox { display: none; position: absolute; top: 28px; right: 0; z-index: 1000; background: #fff; border: 1px solid var(--neutral-200, #e5e7eb); border-radius: 8px; padding: 12px 14px; width: 280px; box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.1); } .info-hint-box:hover .hint-hoverbox { display: block; } .hint-hoverbox { opacity: 0; visibility: hidden; transition: all 0.2s ease-in-out; } .info-hint-box:hover .hint-hoverbox { opacity: 1; visibility: visible; } .hint-hoverbox h5 { font-size: 14px; font-weight: 600; margin-bottom: 6px; } .hint-hoverbox p { font-size: 13px; color: #555; margin: 0; line-height: 1.4; } /* ======================================================== Responsive ======================================================== */ @media (max-width: 991px) { .alt-tech-card div { width: 100%; /* justify-content: flex-end; */ } .alt-tech-card { -ms-flex-wrap: wrap; flex-wrap: wrap; gap: 16px; } .chart-card-head { padding-left: 16px; padding-right: 16px; } #main-content { margin-left: 0; } } @media (max-width: 767px) { main { padding-top: 24px; } .btn { padding-left: 12px; padding-right: 12px; } main { padding-top: 88px; } } @media (max-width: 575px) { } ================================================ FILE: assets/template/index.html ================================================ EscapeCloud Community Edition - Cloud Exit Assessment Report {% set provider_name = "Microsoft Azure" if cloud_service_provider == 1 else "Amazon Web Services" if cloud_service_provider == 2 else "Unknown Provider" %} {% set strategy_name = "Repatriation to On-Premises" if exit_strategy == 1 else "Migration to Alternate Cloud" if exit_strategy == 3 else "Unknown Strategy" %} {% set assessment_name = "Basic" if assessment_type == 1 else "Standard" if assessment_type == 2 else "Unknown" %} {% set total_risks = high_risk_count + medium_risk_count + low_risk_count %}
{% if cloud_service_provider == 1 %} {% elif cloud_service_provider == 2 %} {% else %} {{ provider_name }} {% endif %}
Cloud Service Provider
{{ provider_name }}
Exit Strategy
{{ strategy_name }}
Assessment Type
{{ assessment_name }}
Timestamp
{{ timestamp }}

Risks

{{ total_risks }}
{% if total_risks > 0 %}
{% else %}

No risk data available.

{% endif %}

Costs (last 6 months)

{% if total_cost > 0 %}{{ currency_symbol }}{{ total_cost }}{% else %}-{% endif %}
{% if total_cost > 0 %}
{% else %}

No cost data available.

{% endif %}
{% if assessment_type == 2 %}

Exit Readiness Score

Exit Score

This gauge chart represents the EscapeCloud Platform's exit score methodology, based on risk assessment results and the alternative technology landscape.

It uses a benchmark developed by our experts to quantify the challenges and limitations of exiting the cloud:
- Complex (0 - 20)
- Challenging (20 - 40)
- Manageable (40 - 60)
- Smooth Transition (60 - 80)
- Seamless (80 - 100)

{% if scoring_data %}
{% else %}

No exit score data available.

{% endif %}

Vendor Lock-In Score

Vendor Lock-In Score

The radar chart visualizes alternative technologies across three dimensions:
- Human (skills availability)
- Technology (maturity and vendor stability)
- Operational (ecosystem and support services)

Only where viable alternatives exist.

{% if scoring_data %}
{% else %}

No vendor lock-in score data available.

{% endif %}
{% endif %}

Risks

{% if risks %} {% for risk in risks %} {% endfor %} {% else %} {% endif %}
# Risk Impacted Resources Severity
{{ loop.index }} {{ risk.name }} {% if risk.impacted_resources_count is none %}-{% else %}{{ risk.impacted_resources_count }}{% endif %} {{ risk.severity | capitalize }}
{{ risk.description }}
{% if risk.impacted_resources_count %}
{{ risk.impacted_resources | join(", ") }}
{% endif %}
No risks were identified for this assessment.

Resource Inventory ({{ total_resources }})

{% if resource_inventory %} {% for resource in resource_inventory %}
{{ resource.name | trim }}

{{ resource.name | trim }}

{{ resource.count }} Resource{% if resource.count != 1 %}s{% endif %} Available
{% endfor %} {% else %}

No resources were discovered during the assessment.

{% endif %}
{% if resource_inventory|length > 8 %}
{% endif %}

Alternative Technologies ({{ alternative_technologies | length }})

{% if alternative_technologies %}
{% endif %}
{% if alternative_technologies %} {% for alt_tech in alternative_technologies %} {% set alt_resource = namespace(name="Resource Type " ~ alt_tech.resource_type_id) %} {% for resource in resource_inventory %} {% if resource.resource_type|string == alt_tech.resource_type_id|string %} {% set alt_resource.name = resource.name %} {% endif %} {% endfor %}
Category: {{ alt_resource.name }}

{{ alt_tech.product_name }}

{{ alt_tech.product_description }}

{% if alt_tech.open_source %} {% else %} {% endif %} Open Source
{% if alt_tech.support_plan %} {% else %} {% endif %} Enterprise Support
{% if alt_tech.product_url %} Visit {{ alt_tech.product_name }} {% endif %}
{% endfor %} {% else %}

No alternative technologies are available for this assessment.

{% endif %}
{% if total_risks > 0 %} {% endif %} {% if total_cost > 0 %} {% endif %} {% if scoring_data %} {% endif %}
================================================ FILE: config/aws_example.json ================================================ { "cloudServiceProvider": 2, "exitStrategy": 3, "assessmentType": 1, "providerDetails":{ "accessKey":"AKIAXASFHMTLKD6YQLHA", "secretKey":"", "region":"eu-central-1" } } ================================================ FILE: config/azure_example.json ================================================ { "cloudServiceProvider": 1, "exitStrategy": 3, "assessmentType": 1, "providerDetails":{ "clientId":"57344955-1579-4058-8604-5bb8724002de", "clientSecret":"", "tenantId":"38997009-9dad-42b2-b187-53f1cb71560e", "subscriptionId":"", "resourceGroupName:":"" } } ================================================ FILE: config.py ================================================ # config.py """ Configuration for integrating the local 'cloudexit' tool with the ExitCloud Platform (exitcloud.io). This enables assessment extension and secure result storage in your selected data region. HOST: EU → "eu.exitcloud.io" US → "us.exitcloud.io" KEY: To generate a key: 1. Log in to your regional portal (https://eu.exitcloud.io or https://us.exitcloud.io). 2. Click your user profile (top right corner). 3. Select 'Keys' from the menu. 4. Click 'New Key' and copy the provided key. Please do not modify CLI_VERSION; it is used for debugging purposes. """ CLI_VERSION = "v1.0.0" HOST = "" KEY = "" ================================================ FILE: core/__init__.py ================================================ ================================================ FILE: core/engine.py ================================================ # core/engine.py import logging import os import boto3 from datetime import datetime from typing import Any, Dict, Optional, Tuple from azure.identity import ClientSecretCredential from azure.mgmt.resource import ResourceManagementClient from azure.core.exceptions import ClientAuthenticationError from azure.mgmt.authorization import AuthorizationManagementClient from botocore.exceptions import NoCredentialsError from .utils import copy_assets from .utils_aws import build_aws_resource_inventory, build_aws_cost_inventory from .utils_azure import build_azure_resource_inventory, build_azure_cost_inventory from .utils_db import connect, load_data from .utils_report import ( generate_html_report, generate_pdf_report, generate_json_report, ) from .utils_sync import post_assessment # Configure the logger logger = logging.getLogger("core.engine") # Stage 1 def verify_credentials( cloud_service_provider: int, provider_details: Dict[str, Any] ) -> Tuple[bool, str]: connection_success = False logs = "" if cloud_service_provider == 1: # Azure try: # Use DefaultAzureCredential if provided, else use client secrets credential = provider_details.get("credential") or ClientSecretCredential( tenant_id=provider_details["tenantId"], client_id=provider_details["clientId"], client_secret=provider_details["clientSecret"], ) resource_client = ResourceManagementClient( credential, provider_details["subscriptionId"] ) list( resource_client.resource_groups.list() ) # Benign call to verify credentials connection_success = True logs = "Azure connection successful." except ClientAuthenticationError as e: logs = f"Azure credentials validation failed: {str(e)}" # logger.error(logs) except Exception as e: logs = f"Azure connection test failed: {str(e)}" # logger.error(logs) elif cloud_service_provider == 2: # AWS try: client = boto3.client( "ec2", aws_access_key_id=provider_details["accessKey"], aws_secret_access_key=provider_details["secretKey"], region_name=provider_details["region"], ) client.describe_regions() # Benign call to verify credentials connection_success = True logs = "AWS connection successful." except NoCredentialsError as e: logs = f"AWS credentials validation failed: {str(e)}" # logger.error(logs) except Exception as e: logs = f"AWS connection test failed: {str(e)}" # logger.error(logs) return connection_success, logs # Stage 2 def test_permissions( cloud_service_provider: int, provider_details: Dict[str, Any] ) -> Tuple[bool, bool, bool, str]: permission_valid = False permission_reader = False permission_cost = False logs = "" if cloud_service_provider == 1: # Azure try: # Use DefaultAzureCredential if provided, else use client secrets credential = provider_details.get("credential") or ClientSecretCredential( tenant_id=provider_details["tenantId"], client_id=provider_details["clientId"], client_secret=provider_details["clientSecret"], ) resource_group_scope = f"/subscriptions/{provider_details['subscriptionId']}/resourceGroups/{provider_details['resourceGroupName']}" # Check role assignments auth_client = AuthorizationManagementClient( credential, provider_details["subscriptionId"] ) role_assignments = auth_client.role_assignments.list_for_scope( scope=resource_group_scope ) for role_assignment in role_assignments: role_definition_id = role_assignment.role_definition_id if role_definition_id.endswith( "acdd72a7-3385-48ef-bd42-f606fba81ae7" ): # Reader role permission_reader = True if role_definition_id.endswith( "72fafb9e-0641-4937-9268-a91bfd8191a3" ): # Cost Management Reader permission_cost = True if permission_reader and permission_cost: permission_valid = True logs = "Reader and Cost Management Reader roles validated." elif permission_reader: logs = "Reader role validated, but Cost Management Reader role validation failed." elif permission_cost: logs = "Cost Management Reader role validated, but Reader role validation failed." else: logs = "Both Reader and Cost Management Reader roles validation failed." except ClientAuthenticationError as e: logs = f"Azure credentials validation failed: {str(e)}" logger.error(logs) except Exception as e: logs = f"Azure permission test failed: {str(e)}" logger.error(logs) elif cloud_service_provider == 2: # AWS try: sts_client = boto3.client( "sts", aws_access_key_id=provider_details["accessKey"], aws_secret_access_key=provider_details["secretKey"], region_name=provider_details["region"], ) identity = sts_client.get_caller_identity() user_arn = identity["Arn"] user_name = user_arn.split("/")[-1] iam_client = boto3.client( "iam", aws_access_key_id=provider_details["accessKey"], aws_secret_access_key=provider_details["secretKey"], region_name=provider_details["region"], ) policies = iam_client.list_attached_user_policies(UserName=user_name) policy_names = [ policy["PolicyName"] for policy in policies["AttachedPolicies"] ] permission_reader = "ViewOnlyAccess" in policy_names permission_cost = "AWSBillingReadOnlyAccess" in policy_names if permission_reader and permission_cost: permission_valid = True logs = "ViewOnlyAccess and AWSBillingReadOnlyAccess policies validated." elif permission_reader: logs = "ViewOnlyAccess policy validated, but AWSBillingReadOnlyAccess policy validation failed." elif permission_cost: logs = "AWSBillingReadOnlyAccess policy validated, but ViewOnlyAccess policy validation failed." else: logs = "Both ViewOnlyAccess and AWSBillingReadOnlyAccess policy validations failed." except NoCredentialsError as e: logs = f"AWS credentials validation failed: {str(e)}" logger.error(logs) except Exception as e: logs = f"AWS permission test failed: {str(e)}" logger.error(logs) permission_valid = permission_reader and permission_cost return permission_valid, permission_reader, permission_cost, logs # Stage 3 def create_resource_inventory( cloud_service_provider: int, provider_details: Dict[str, Any], report_path: str, raw_data_path: str, ) -> Dict[str, Any]: # Copy assets and datasets folders data copy_assets(report_path) try: if cloud_service_provider == 1: # Azure build_azure_resource_inventory( cloud_service_provider, provider_details, report_path, raw_data_path ) elif cloud_service_provider == 2: # AWS build_aws_resource_inventory( cloud_service_provider, provider_details, report_path, raw_data_path ) return {"success": True, "logs": "Resource inventory created successfully."} except Exception as e: logger.error(f"Error creating resource inventory: {str(e)}", exc_info=True) # Do not raise the exception here; just return the error information return {"success": False, "logs": str(e)} # Stage 4 def create_cost_inventory( cloud_service_provider: int, provider_details: Dict[str, Any], report_path: str, raw_data_path: str, ) -> Dict[str, Any]: try: if cloud_service_provider == 1: # Azure build_azure_cost_inventory( cloud_service_provider, provider_details, report_path, raw_data_path ) elif cloud_service_provider == 2: # AWS build_aws_cost_inventory( cloud_service_provider, provider_details, report_path, raw_data_path ) return {"success": True, "logs": "Cost inventory created successfully."} except Exception as e: logger.error(f"Error creating cost inventory: {str(e)}", exc_info=True) return {"success": False, "logs": str(e)} # Stage 5 - Online def sync_assessment( report_path: str, name: str, started_at: int, metadata: Dict[str, Any], mode: str, token: Optional[str], ) -> Dict[str, Any]: if mode != "online" or not token: return { "success": True, "online": False, "payload": None, "logs": "offline – sync skipped.", } result = post_assessment( name=name, started_at=started_at, report_path=report_path, meta=metadata, token=token, ) if not result.get("success"): raise RuntimeError(f"Assessment sync failed: {result.get('logs')}") logger.debug(result) try: payload = result["payload"].get("data", {}) server_risks = payload.get("risk_inventory", []) rows = [] for entry in server_risks: rid = entry["id"] impacted = entry.get("impacted_resources", []) if impacted: for rt in impacted: rows.append((rt, rid)) else: rows.append(("null", rid)) db_path = os.path.join(report_path, "data", "assessment.db") with connect(db_path=db_path) as conn: cursor = conn.cursor() cursor.executemany( """ INSERT INTO risk_inventory (resource_type, risk) VALUES (?, ?) """, rows, ) conn.commit() except Exception as e: logger.error("Error saving server risks to local DB: %s", str(e), exc_info=True) raise RuntimeError(f"Failed to store server risks: {str(e)}") try: scoring = payload.get("scoring_data") if scoring: db_path = os.path.join(report_path, "data", "assessment.db") with connect(db_path=db_path) as conn: cursor = conn.cursor() cursor.execute( """ INSERT INTO scoring_data (exit_score, human_score, technology_score, operational_score) VALUES (?, ?, ?, ?) """, ( int(scoring["exit_score"]), int(scoring["human_score"]), int(scoring["technology_score"]), int(scoring["operational_score"]), ), ) conn.commit() logger.debug("Scoring data saved to local DB.") except Exception as e: logger.error("Error saving scoring data to local DB: %s", str(e), exc_info=True) raise RuntimeError(f"Failed to store scoring data: {str(e)}") return result # Stage 5 - Offline def perform_risk_assessment( exit_strategy: int, report_path: str, mode: str ) -> Dict[str, Any]: if mode != "offline": logger.debug("Online mode – skipping local risk assessment.") return {"success": True, "logs": "online mode – local risk skipped."} try: # Define the database path db_path = os.path.join(report_path, "data", "assessment.db") # Load data from the database resource_inventory = load_data("resource_inventory", db_path=db_path) alternatives = load_data("alternative", db_path=db_path) alternative_technologies = load_data("alternativetechnology", db_path=db_path) # Initialize risk inventory risk_inventory = [] # Calculate the total count of resources across all types total_resource_count = sum(item["count"] for item in resource_inventory) # Calculate total number of distinct resource types distinct_resource_types = set( item["resource_type"] for item in resource_inventory ) total_resource_types = len(distinct_resource_types) # Process each resource by `resource_type` for resource_data in resource_inventory: resource_type_id = str( resource_data["resource_type"] ) # Convert to string for consistent comparison # Filter alternatives for the current resource_type and exit strategy relevant_alternatives = [ alt for alt in alternatives if str(alt["resource_type"]) == resource_type_id and str(alt["strategy_type"]) == str(exit_strategy) ] alternative_count = len(relevant_alternatives) # Count alternatives with support support_count = sum( 1 for alt in relevant_alternatives if any( tech["id"] == alt["alternative_technology"] and tech["support_plan"] == "t" for tech in alternative_technologies ) ) # Determine risks based on criteria, using resource_type_id in output if 1 <= alternative_count < 3: risk_inventory.append({"resource_type": resource_type_id, "risk": "1"}) if alternative_count == 0: risk_inventory.append({"resource_type": resource_type_id, "risk": "2"}) if 1 <= support_count < 3: risk_inventory.append({"resource_type": resource_type_id, "risk": "3"}) if support_count == 0: risk_inventory.append({"resource_type": resource_type_id, "risk": "4"}) # Check for risks based on total resource count across all types if 15 < total_resource_count <= 30: risk_inventory.append({"resource_type": "null", "risk": "5"}) elif total_resource_count > 30: risk_inventory.append({"resource_type": "null", "risk": "6"}) # Check for risks based on total number of resource types if 15 < total_resource_types <= 30: risk_inventory.append({"resource_type": "null", "risk": "7"}) elif total_resource_types > 30: risk_inventory.append({"resource_type": "null", "risk": "8"}) # Insert risk inventory into the database with connect(db_path=db_path) as conn: cursor = conn.cursor() cursor.executemany( """ INSERT INTO risk_inventory (resource_type, risk) VALUES (?, ?) """, [(entry["resource_type"], entry["risk"]) for entry in risk_inventory], ) conn.commit() return {"success": True, "logs": "Risk assessment completed successfully."} except Exception as e: logger.error(f"Error performing risk assessment: {str(e)}", exc_info=True) return {"success": False, "logs": str(e)} # Stage 6 def generate_report( cloud_service_provider: int, provider_details: Dict[str, Any], exit_strategy: int, assessment_type: int, name: str, report_path: str, raw_data_path: str, ) -> Dict[str, Any]: try: db_path = os.path.join(report_path, "data", "assessment.db") # Load data resource_type_mapping = { str(item["id"]): item for item in load_data("resourcetype", db_path=db_path) } risk_definitions = load_data("risk", db_path=db_path) alternatives = load_data("alternative", db_path=db_path) alternative_technologies = load_data("alternativetechnology", db_path=db_path) resource_inventory = load_data("resource_inventory", db_path=db_path) cost_data = load_data("cost_inventory", db_path=db_path) risk_data = load_data("risk_inventory", db_path=db_path) scoring_data = load_data("scoring_data", db_path=db_path) # Timestamp timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC") metadata = { "name": name, "cloud_service_provider": cloud_service_provider, "exit_strategy": exit_strategy, "assessment_type": assessment_type, "timestamp": timestamp, } # Handle scoring_data if isinstance(scoring_data, list): if len(scoring_data) == 1: scoring_data = scoring_data[0] elif len(scoring_data) == 0: scoring_data = None else: logger.warning( "Unexpected multiple rows in scoring_data: %d", len(scoring_data) ) scoring_data = scoring_data[0] # Generate Outputs reports = {} # Generate HTML report reports["HTML"] = generate_html_report( report_path, metadata, resource_type_mapping, resource_inventory, cost_data, scoring_data, risk_data, risk_definitions, alternatives, alternative_technologies, exit_strategy, ) # Generate PDF report reports["PDF"] = generate_pdf_report( provider_details, report_path, metadata, resource_type_mapping, resource_inventory, cost_data, scoring_data, risk_data, risk_definitions, alternatives, alternative_technologies, exit_strategy, ) # Generate JSON report reports["JSON"] = generate_json_report( raw_data_path, metadata, resource_type_mapping, resource_inventory, cost_data, scoring_data, risk_data, risk_definitions, alternatives, alternative_technologies, exit_strategy, ) return {"success": True, "reports": reports} except Exception as e: return {"success": False, "logs": f"Error generating report: {str(e)}"} ================================================ FILE: core/utils.py ================================================ # core/utils.py import os import shutil import logging logger = logging.getLogger("core.engine.utils") def copy_assets(report_path: str) -> None: assets_folders = ["css", "icons", "img"] assets_path = os.path.join(report_path, "assets") # Create the 'assets' directory if it doesn't exist os.makedirs(assets_path, exist_ok=True) for folder in assets_folders: src_path = os.path.join("assets", folder) dest_path = os.path.join(assets_path, folder) # Only copy if the destination doesn't already exist if not os.path.exists(dest_path): shutil.copytree(src_path, dest_path, dirs_exist_ok=True) # Copy datasets/data.db to data/assessment.db db_src_path = "datasets/data.db" db_dest_dir = os.path.join(report_path, "data") db_dest_path = os.path.join(db_dest_dir, "assessment.db") # Create the 'data' directory if it doesn't exist os.makedirs(db_dest_dir, exist_ok=True) # Only copy if the destination doesn't already exist if not os.path.exists(db_dest_path): shutil.copyfile(db_src_path, db_dest_path) ================================================ FILE: core/utils_aws.py ================================================ # core/utils_aws.py import boto3 import botocore import json import os import time import logging import sqlite3 from typing import Any, Dict, Set, List, Callable from datetime import date, datetime from collections import defaultdict from dateutil.relativedelta import relativedelta from botocore.exceptions import NoCredentialsError, ClientError from .utils_db import connect, load_data logger = logging.getLogger("core.engine.aws") def aws_api_call_with_retry( client: Any, function_name: str, parameters: Dict[str, Any], max_retries: int, retry_delay: int, ) -> Callable[..., Any]: def api_call(*args, **kwargs): for attempt in range(max_retries): try: function_to_call = getattr(client, function_name) if parameters: return function_to_call(**parameters, **kwargs) else: return function_to_call(**kwargs) except botocore.exceptions.ClientError as error: error_code = error.response["Error"]["Code"] # logger.warning(f"ClientError: {error_code}. Attempt {attempt + 1} of {max_retries}. Retrying in {retry_delay} seconds.") if error_code in ["Throttling", "RequestLimitExceeded"]: time.sleep(retry_delay * (2**attempt)) continue else: raise except botocore.exceptions.BotoCoreError: # logger.warning(f"BotoCoreError. Attempt {attempt + 1} of {max_retries}. Retrying in {retry_delay} seconds.") time.sleep(retry_delay * (2**attempt)) continue raise Exception(f"Failed to call {function_name} after {max_retries} attempts") return api_call # Return the callable function def convert_datetime(obj: Any) -> Any: if isinstance(obj, dict): for k, v in obj.items(): obj[k] = convert_datetime(v) elif isinstance(obj, list): for i in range(len(obj)): obj[i] = convert_datetime(obj[i]) elif isinstance(obj, datetime): return obj.isoformat() return obj def build_aws_resource_inventory( cloud_service_provider: int, provider_details: Dict[str, Any], report_path: str, raw_data_path: str, ) -> None: try: access_key = provider_details["accessKey"] secret_key = provider_details["secretKey"] region = provider_details["region"] session = boto3.Session( aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region, ) db_path = os.path.join(report_path, "data", "assessment.db") # Load the ResourceType mapping resource_type_mapping = { item["code"]: {"id": item["id"], "name": item["name"]} for item in load_data("resourcetype") if item["csp"] == 2 and item["status"] == "t" } # Save raw data for debugging and auditing purposes raw_data = [] # Aggregate resources by type and location aggregated_resources = defaultdict(int) # Iterate through each resource type in the JSON for idx, (resource_type_code, resource_info) in enumerate( resource_type_mapping.items(), start=1 ): parts = resource_type_code.split(".") if len(parts) != 4 or parts[0] != "AWS": # logger.warning(f"Invalid resource type format: {resource_type_code}. Skipping.") continue # Extract service name, operation name, and result key service_name, operation_name, result_key = parts[1], parts[2], parts[3] # logger.info(f"Processing service {service_name} with operation {operation_name}") try: client = session.client(service_name, region_name=region) if not hasattr(client, operation_name): # logger.error(f"Operation {operation_name} does not exist for service {service_name}") continue # Make the API call api_call = aws_api_call_with_retry( client, operation_name, {}, max_retries=3, retry_delay=2 ) response = api_call() if isinstance(response, dict): response.pop("ResponseMetadata", None) resources = response.get(result_key.strip(), []) # Handle paginated results while "NextToken" in response: next_token = response["NextToken"] response = api_call(NextToken=next_token) response.pop("ResponseMetadata", None) resources.extend(response.get(result_key.strip(), [])) else: # logger.warning(f"No valid response found for {service_name} operation {operation_name}. Skipping.") continue # Aggregate the resources for resource in resources: aggregated_resources[(resource_type_code, region)] += 1 # Store raw data raw_data.append( { "service": service_name, "operation": operation_name, "resources": resources, } ) except (NoCredentialsError, ClientError, Exception): # logger.error(f"Error while processing {service_name}", exc_info=True) continue # Save raw data to a JSON file raw_data = convert_datetime(raw_data) raw_file_path = os.path.join(raw_data_path, "resource_inventory_raw_data.json") with open(raw_file_path, "w", encoding="utf-8") as raw_file: json.dump(raw_data, raw_file, indent=4) # Insert aggregated data into SQLite with connect(db_path=db_path) as conn: cursor = conn.cursor() for ( resource_type_code, resource_location, ), resource_count in aggregated_resources.items(): try: # Map resource type code to resource_type_id resource_info = resource_type_mapping.get(resource_type_code) if not resource_info: # logger.warning(f"Resource type {resource_type_code} not found in resourcetype mapping. Skipping.") continue resource_type_id = resource_info["id"] cursor.execute( """ INSERT INTO resource_inventory (resource_type, location, count) VALUES (?, ?, ?) ON CONFLICT(resource_type, location) DO UPDATE SET count = excluded.count """, (resource_type_id, resource_location, resource_count), ) except sqlite3.Error as e: logger.error( f"SQLite error while processing aggregated resource: {e}", exc_info=True, ) except Exception as e: logger.error( f"Unexpected error while processing aggregated resource: {e}", exc_info=True, ) conn.commit() except Exception as e: logger.error(f"Error creating AWS resource inventory: {str(e)}", exc_info=True) def get_missing_months_aws(processed_costs: Set[str], max_months: int) -> List[date]: current_date = datetime.utcnow().date().replace(day=1) processed_months = { datetime.strptime(month_str, "%Y-%m-%d").date().replace(day=1) for month_str in processed_costs } missing_months = [] for i in range(max_months): check_date = current_date - relativedelta(months=i) if check_date not in processed_months: missing_months.append(check_date) return missing_months def build_aws_cost_inventory( cloud_service_provider: int, provider_details: Dict[str, Any], report_path: str, raw_data_path: str, ) -> None: try: session = boto3.Session( aws_access_key_id=provider_details["accessKey"], aws_secret_access_key=provider_details["secretKey"], region_name=provider_details["region"], ) cost_explorer = session.client("ce", region_name="us-east-1") db_path = os.path.join(report_path, "data", "assessment.db") end_time = date.today().replace(day=1) + relativedelta(months=1) start_time = end_time - relativedelta(months=6) cost_and_usage = cost_explorer.get_cost_and_usage( TimePeriod={ "Start": start_time.strftime("%Y-%m-%d"), "End": end_time.strftime("%Y-%m-%d"), }, Granularity="MONTHLY", Metrics=["UnblendedCost"], GroupBy=[{"Type": "DIMENSION", "Key": "SERVICE"}], Filter={ "Dimensions": {"Key": "REGION", "Values": [provider_details["region"]]} }, ) cost_inventory_raw_path = os.path.join( raw_data_path, "cost_inventory_raw_data.json" ) with open(cost_inventory_raw_path, "w", encoding="utf-8") as raw_file: json.dump(cost_and_usage, raw_file, indent=4) # Insert structured data into SQLite with connect(db_path=db_path) as conn: cursor = conn.cursor() for result in cost_and_usage["ResultsByTime"]: month_str = result["TimePeriod"]["Start"] total_cost = sum( float(group["Metrics"]["UnblendedCost"]["Amount"]) for group in result["Groups"] ) currency = ( result["Groups"][0]["Metrics"]["UnblendedCost"]["Unit"] if result["Groups"] else "USD" ) month_date = ( datetime.strptime(month_str, "%Y-%m-%d") .date() .replace(day=1) .isoformat() ) # Insert or update the cost data for the month cursor.execute( """ INSERT INTO cost_inventory (month, cost, currency) VALUES (?, ?, ?) ON CONFLICT(month) DO UPDATE SET cost = excluded.cost, currency = excluded.currency """, (month_date, total_cost, currency), ) # Handle missing months structured_months = { datetime.strptime(result["TimePeriod"]["Start"], "%Y-%m-%d").date() for result in cost_and_usage["ResultsByTime"] } missing_months = get_missing_months_aws( {month.isoformat() for month in structured_months}, 6 ) for missing_month in missing_months: cursor.execute( """ INSERT INTO cost_inventory (month, cost, currency) VALUES (?, 0.00, ?) ON CONFLICT(month) DO UPDATE SET currency = excluded.currency """, (missing_month.isoformat(), currency), ) conn.commit() except sqlite3.Error as e: logger.error(f"SQLite error: {str(e)}", exc_info=True) except Exception as e: logger.error(f"Error creating AWS cost inventory: {str(e)}", exc_info=True) raise except Exception as e: logger.error(f"Error creating AWS cost inventory: {str(e)}", exc_info=True) raise ================================================ FILE: core/utils_azure.py ================================================ # core/utils_azure.py import json import os import logging import sqlite3 from typing import Any, Dict, Set from datetime import date, datetime from dateutil.relativedelta import relativedelta from collections import defaultdict from azure.identity import ClientSecretCredential from azure.mgmt.resource import ResourceManagementClient from azure.mgmt.costmanagement import CostManagementClient from azure.mgmt.costmanagement.models import QueryDefinition, TimeframeType from azure.core.exceptions import AzureError, ClientAuthenticationError from .utils_db import connect, load_data logger = logging.getLogger("core.engine.azure") logging.getLogger("azure").setLevel(logging.WARNING) def is_resource_inventory_empty( credential: Any, subscription_id: str, resource_group_name: str ) -> bool: try: resource_client = ResourceManagementClient(credential, subscription_id) # logger.info("Checking Azure resource inventory...") resources = list( resource_client.resources.list_by_resource_group(resource_group_name) ) if not resources: # logger.info("No resources found in the resource group.") return True else: # logger.info("Resources found in the resource group.") return False except AzureError as e: logger.error( f"Error checking Azure resource inventory: {str(e)}", exc_info=True ) raise def build_azure_resource_inventory( cloud_service_provider: int, provider_details: Dict[str, Any], report_path: str, raw_data_path: str, ) -> None: try: # Use DefaultAzureCredential if provided, otherwise fall back to ClientSecretCredential credential = provider_details.get("credential") or ClientSecretCredential( tenant_id=provider_details["tenantId"], client_id=provider_details["clientId"], client_secret=provider_details["clientSecret"], ) subscription_id = provider_details["subscriptionId"] resource_group_name = provider_details["resourceGroupName"] db_path = os.path.join(report_path, "data", "assessment.db") # Check if resource inventory is empty if is_resource_inventory_empty( credential, subscription_id, resource_group_name ): logger.warning( "The selected resource group does not contain any resources." ) return resource_client = ResourceManagementClient(credential, subscription_id) # Fetch resources and serialize to raw JSON resources = list( resource_client.resources.list_by_resource_group(resource_group_name) ) raw_data = [resource.serialize(True) for resource in resources] # Save raw data to a JSON file raw_file_path = os.path.join(raw_data_path, "resource_inventory_raw_data.json") with open(raw_file_path, "w", encoding="utf-8") as raw_file: json.dump(raw_data, raw_file, indent=4) # Load resource type mapping from the assessment database resource_type_mapping = getattr( build_azure_resource_inventory, "_resource_type_cache", None ) if resource_type_mapping is None: resource_type_mapping = { item["code"].strip().lower(): {"id": item["id"], "name": item["name"]} for item in load_data("resourcetype", db_path=db_path) if item["csp"] == 1 and item["status"] == "t" } build_azure_resource_inventory._resource_type_cache = resource_type_mapping # Aggregate resources by type and location aggregated_resources = defaultdict(int) for resource in resources: resource_type_code = resource.type.strip().lower() resource_location = resource.location.strip().lower() aggregated_resources[(resource_type_code, resource_location)] += 1 # Insert data into SQLite with connect(db_path=db_path) as conn: cursor = conn.cursor() data_to_insert = [ ( resource_type_mapping[resource_type_code]["id"], resource_location, resource_count, ) for ( resource_type_code, resource_location, ), resource_count in aggregated_resources.items() if resource_type_code in resource_type_mapping ] cursor.executemany( """ INSERT INTO resource_inventory (resource_type, location, count) VALUES (?, ?, ?) ON CONFLICT(resource_type, location) DO UPDATE SET count = excluded.count """, data_to_insert, ) conn.commit() except ClientAuthenticationError as e: logger.error(f"Azure authentication error: {str(e)}", exc_info=True) except sqlite3.Error as e: logger.error(f"SQLite error: {str(e)}", exc_info=True) except Exception as e: logger.error(f"Error fetching Azure resources: {str(e)}", exc_info=True) def get_missing_months_azure(processed_costs: Set[str], months_back: int) -> Set[date]: today = date.today() start_date = today.replace(day=1) - relativedelta(months=months_back - 1) all_months = { (start_date + relativedelta(months=i)).replace(day=1) for i in range(months_back) } processed_months = set() for month_str in processed_costs: try: # Attempt parsing with full timestamp format month_date = ( datetime.strptime(month_str, "%Y-%m-%dT%H:%M:%S").date().replace(day=1) ) except ValueError: # Fallback to date-only format if full timestamp fails month_date = datetime.strptime(month_str, "%Y-%m-%d").date().replace(day=1) processed_months.add(month_date) return all_months - processed_months def build_azure_cost_inventory( cloud_service_provider: int, provider_details: Dict[str, Any], report_path: str, raw_data_path: str, ) -> None: try: # Use DefaultAzureCredential if provided, otherwise fall back to ClientSecretCredential credential = provider_details.get("credential") or ClientSecretCredential( tenant_id=provider_details["tenantId"], client_id=provider_details["clientId"], client_secret=provider_details["clientSecret"], ) cost_management_client = CostManagementClient( credential, base_url="https://management.azure.com" ) db_path = os.path.join(report_path, "data", "assessment.db") end_time = date.today() months_back = 6 start_time = end_time.replace(day=1) - relativedelta(months=months_back - 1) query = QueryDefinition( type="Usage", timeframe=TimeframeType.CUSTOM, time_period={ "from": start_time.strftime("%Y-%m-%dT00:00:00Z"), "to": end_time.strftime("%Y-%m-%dT00:00:00Z"), }, dataset={ "granularity": "Monthly", "aggregation": {"totalCost": {"name": "Cost", "function": "Sum"}}, }, ) cost_data = cost_management_client.query.usage( f'/subscriptions/{provider_details["subscriptionId"]}/resourceGroups/{provider_details["resourceGroupName"]}', query, ) cost_inventory_raw_path = os.path.join( raw_data_path, "cost_inventory_raw_data.json" ) with open(cost_inventory_raw_path, "w", encoding="utf-8") as raw_file: json.dump(cost_data.as_dict(), raw_file, indent=4) # Insert structured cost data into SQLite with connect(db_path=db_path) as conn: cursor = conn.cursor() for row in cost_data.rows: cost, month_str, currency = row month_date = ( datetime.strptime(month_str, "%Y-%m-%dT%H:%M:%S") .date() .replace(day=1) .isoformat() ) # Insert or update cost data cursor.execute( """ INSERT INTO cost_inventory (month, cost, currency) VALUES (?, ?, ?) ON CONFLICT(month) DO UPDATE SET cost = excluded.cost, currency = excluded.currency """, (month_date, cost, currency), ) # Extract months already in the cost data structured_months = { datetime.strptime(row[1], "%Y-%m-%dT%H:%M:%S").date() for row in cost_data.rows } # Identify missing months and insert with zero cost missing_months = get_missing_months_azure( {month.isoformat() for month in structured_months}, 6 ) for missing_month in missing_months: cursor.execute( """ INSERT INTO cost_inventory (month, cost, currency) VALUES (?, 0.00, ?) ON CONFLICT(month) DO UPDATE SET currency = excluded.currency """, (missing_month.isoformat(), currency), ) conn.commit() except sqlite3.Error as e: logger.error(f"SQLite error: {str(e)}", exc_info=True) except Exception as e: logger.error(f"Error creating Azure cost inventory: {str(e)}", exc_info=True) raise ================================================ FILE: core/utils_db.py ================================================ # core/utils_db.py import sqlite3 import logging # Configure logger for database operations logger = logging.getLogger("core.engine.db") logger.setLevel(logging.INFO) # Default master database MASTER_DATABASE = "datasets/data.db" def connect(db_path=MASTER_DATABASE): try: conn = sqlite3.connect(db_path) return conn except sqlite3.Error as e: logger.error(f"Error connecting to database: {e}") raise def load_data(table_name, db_path=MASTER_DATABASE): try: conn = connect(db_path) cursor = conn.cursor() cursor.execute(f"SELECT * FROM {table_name}") columns = [desc[0] for desc in cursor.description] rows = cursor.fetchall() conn.close() return [dict(zip(columns, row)) for row in rows] except sqlite3.Error as e: logger.error(f"Error loading data from table '{table_name}': {e}") raise def execute_query(query, params=None, db_path=MASTER_DATABASE): try: conn = connect(db_path) cursor = conn.cursor() cursor.execute(query, params or ()) conn.commit() rowcount = cursor.rowcount conn.close() return rowcount except sqlite3.Error as e: logger.error(f"Error executing query: {e}") raise def fetch_one(query, params=None, db_path=MASTER_DATABASE): try: conn = connect(db_path) cursor = conn.cursor() cursor.execute(query, params or ()) row = cursor.fetchone() columns = [desc[0] for desc in cursor.description] conn.close() return dict(zip(columns, row)) if row else None except sqlite3.Error as e: logger.error(f"Error fetching data: {e}") raise def fetch_all(query, params=None, db_path=MASTER_DATABASE): try: conn = connect(db_path) cursor = conn.cursor() cursor.execute(query, params or ()) rows = cursor.fetchall() columns = [desc[0] for desc in cursor.description] conn.close() return [dict(zip(columns, row)) for row in rows] except sqlite3.Error as e: logger.error(f"Error fetching data: {e}") raise ================================================ FILE: core/utils_report.py ================================================ # core/utils_report.py import os import json import logging from typing import List, Dict, Any, Optional from jinja2 import Template # ReportLab from reportlab.lib.pagesizes import A4 from reportlab.lib.units import cm from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib import colors from reportlab.lib.colors import HexColor from reportlab.platypus import ( SimpleDocTemplate, Paragraph, Spacer, PageBreak, Image, Table, TableStyle, ) # Utils from core.utils_report_html import ( transform_cost_inventory_for_html, transform_risk_inventory_for_html, transform_alt_tech_for_html, ) from core.utils_report_json import ( transform_resource_inventory_for_json, transform_cost_inventory_for_json, transform_risk_inventory_for_json, transform_alt_tech_for_json, ) from core.utils_report_pdf import ( transform_resource_inventory_for_pdf, transform_cost_inventory_for_pdf, transform_risk_inventory_for_pdf, transform_alt_tech_for_pdf, draw_header_footer, draw_risk_chart, draw_cost_chart, draw_vendor_lockin_radar_chart, draw_exitscore_chart, ) # Configure logger logger = logging.getLogger("core.engine.report") logger.setLevel(logging.INFO) def anonymize_string(s: str, num_visible: int = 4) -> str: if not isinstance(s, str): return "N/A" if len(s) <= 2 * num_visible: return "*" * len(s) middle_length = len(s) - 2 * num_visible return f"{s[:num_visible]}{'*' * middle_length}{s[-num_visible:]}" def generate_html_report( report_path: str, metadata: Dict[str, Any], resource_type_mapping: Dict[str, Dict[str, Any]], resource_inventory: List[Dict[str, Any]], cost_data: List[Dict[str, Any]], scoring_data: Optional[Dict[str, Any]], risk_data: List[Dict[str, Any]], risk_definitions: List[Dict[str, Any]], alternatives: List[Dict[str, Any]], alternative_technologies: List[Dict[str, Any]], exit_strategy: int, ) -> str: # Transform resource inventory resource_inventory_dict = { str(item["resource_type"]): { **item, "name": resource_type_mapping.get(str(item["resource_type"]), {}).get( "name", "Unknown Resource" ), "icon": "/assets" + resource_type_mapping.get(str(item["resource_type"]), {}).get( "icon", "/icons/default.png" ), } for item in resource_inventory } # Transform risks risks, severity_counts = transform_risk_inventory_for_html( risk_data, risk_definitions, resource_inventory_dict ) # Transform costs months, cost_values, total_cost, currency, currency_symbol = ( transform_cost_inventory_for_html(cost_data) ) # Transform resource data with names and icons resource_counts = [] for resource_type, resource in resource_inventory_dict.items(): count = resource.get("count", 0) resource_info = resource_type_mapping.get(str(resource_type), {}) name = resource_info.get("name", "Unknown Resource") icon = resource_info.get("icon", "assets/icons/default.png").lstrip("/") resource_counts.append( {"resource_type": resource_type, "name": name, "icon": icon, "count": count} ) # Calculate total resources total_resources = sum(item["count"] for item in resource_counts) # Transform alternative technologies alternative_technologies_data = transform_alt_tech_for_html( resource_inventory, alternatives, alternative_technologies, exit_strategy ) # Scoring Data scoring_context = { "scoring_data": bool(scoring_data), "exit_score": scoring_data.get("exit_score", 0) if scoring_data else 0, "human": scoring_data.get("human_score", 0) if scoring_data else 0, "technology": scoring_data.get("technology_score", 0) if scoring_data else 0, "operational": scoring_data.get("operational_score", 0) if scoring_data else 0, } # Render the HTML template template_path = os.path.join("assets", "template", "index.html") with open(template_path, "r") as file: template_content = file.read() template = Template(template_content) html_content = template.render( **metadata, **scoring_context, risks=risks, high_risk_count=severity_counts["high"], medium_risk_count=severity_counts["medium"], low_risk_count=severity_counts["low"], total_cost=total_cost, months_json=json.dumps(months), costs_json=json.dumps(cost_values), currency_symbol=currency_symbol, total_resources=total_resources, resource_inventory=resource_counts, alternative_technologies=alternative_technologies_data, ) # Save HTML report html_path = os.path.join(report_path, "index.html") with open(html_path, "w") as report_file: report_file.write(html_content) return html_path def generate_json_report( raw_data_path: str, metadata: Dict[str, Any], resource_type_mapping: Dict[str, Dict[str, Any]], resource_inventory: List[Dict[str, Any]], cost_data: List[Dict[str, Any]], scoring_data: Optional[Dict[str, Any]], risk_data: List[Dict[str, Any]], risk_definitions: List[Dict[str, Any]], alternatives: List[Dict[str, Any]], alternative_technologies: List[Dict[str, Any]], exit_strategy: int, ) -> str: # Transform data for JSON transformed_resource_inventory = transform_resource_inventory_for_json( resource_inventory, resource_type_mapping ) transformed_cost_inventory = transform_cost_inventory_for_json(cost_data) transformed_risk_inventory = transform_risk_inventory_for_json( risk_data, risk_definitions, resource_inventory ) transformed_alt_tech = transform_alt_tech_for_json( resource_inventory, alternatives, alternative_technologies, exit_strategy ) # Build the JSON structure report_json = { "meta": metadata, "data": { "resource_inventory": transformed_resource_inventory, "cost_inventory": transformed_cost_inventory, "risk_inventory": transformed_risk_inventory, }, } # Add scoring_data only if present if scoring_data: report_json["data"]["scoring_data"] = { "exit_score": scoring_data.get("exit_score", 0), "human_score": scoring_data.get("human_score", 0), "technology_score": scoring_data.get("technology_score", 0), "operational_score": scoring_data.get("operational_score", 0), } # Add alternative technologies report_json["data"]["alternative_technologies"] = transformed_alt_tech # Save JSON to file json_path = os.path.join(raw_data_path, "assessment_result.json") with open(json_path, "w") as json_file: json.dump(report_json, json_file, indent=4) return json_path def generate_pdf_report( provider_details: Dict[str, Any], report_path: str, metadata: Dict[str, Any], resource_type_mapping: Dict[str, Any], resource_inventory: List[Dict[str, Any]], cost_data: List[Dict[str, Any]], scoring_data: Optional[Dict[str, Any]], risk_data: List[Dict[str, Any]], risk_definitions: List[Dict[str, Any]], alternatives: List[Dict[str, Any]], alternative_technologies: List[Dict[str, Any]], exit_strategy: int, ) -> str: # Define the PDF path pdf_path = os.path.join(report_path, "report.pdf") # Define a template for the header and footer def header_footer(canvas, doc): # Make sure draw_header_footer is defined and accessible draw_header_footer(report_path, canvas, doc) # Create a document template with the header and footer doc = SimpleDocTemplate( pdf_path, pagesize=A4, title="EscapeCloud_-_Cloud_Exit_Assessment" ) styles = getSampleStyleSheet() content_style = ParagraphStyle( "ContentStyle", fontSize=10, leading=12, spaceAfter=10 ) styles["Heading1"].leading = 1.5 * styles["Heading1"].fontSize styles["Heading1"].textColor = HexColor("#112726") styles["Heading2"].leading = 1.5 * styles["Heading2"].fontSize styles["Heading2"].textColor = HexColor("#112726") tablecontent_style = styles["BodyText"] # Define a custom padding value header_padding = 12 content = [] # --- # Page 1: Summary --- content.append(Spacer(1, header_padding)) content.append(Paragraph("Summary", styles["Heading1"])) summary_block1 = "Quick overview of the assessment:" content.append(Paragraph(summary_block1, content_style)) # Prepare mappings cloud_service_provider_map = { "1": "Microsoft Azure", "2": "Amazon Web Services", "3": "Alibaba Cloud", "4": "Google Cloud", } exit_strategy_map = { "1": "Repatriation to On-Premises", "2": "Hybrid Cloud Adoption", "3": "Migration to Alternate Cloud", } type_map = {"1": "Basic", "2": "Standard"} # Prepare the summary data summary_data = [ ["Name", "Value"], [ "Cloud Service Provider", cloud_service_provider_map.get( str(metadata["cloud_service_provider"]), "Unknown" ), ], [ "Exit Strategy", exit_strategy_map.get(str(metadata["exit_strategy"]), "Unknown"), ], ["Assessment Type", type_map.get(str(metadata["assessment_type"]), "Unknown")], ["TimeStamp", metadata["timestamp"]], ] # Column widths summary_colWidths = [4 * cm, 11.5 * cm] # Create the summary table summary_table = Table(summary_data, colWidths=summary_colWidths) # Define the summary table style summary_table_style = TableStyle( [ ( "BACKGROUND", (0, 0), (-1, 0), HexColor("#115e59"), ), # Header row background color ("TEXTCOLOR", (0, 0), (-1, 0), colors.white), # Header row text color ("GRID", (0, 0), (-1, -1), 1, HexColor("#000000")), # Grid lines ("ALIGN", (0, 0), (-1, -1), "LEFT"), # Left align all cells ( "VALIGN", (0, 0), (-1, -1), "MIDDLE", ), # Middle vertical alignment for all cells ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), # Bold font for header row ("FONTSIZE", (0, 0), (-1, 0), 11), # Font size for header row ("BOTTOMPADDING", (0, 0), (-1, 0), 12), # Padding for header row ("TOPPADDING", (0, 0), (-1, 0), 12), # Padding for header row ] ) summary_table.setStyle(summary_table_style) # Add summary to content content.append(summary_table) content.append(Spacer(1, 12)) # --- Page 1: Scope of Assessment --- content.append(Paragraph("Scope of Assessment", styles["Heading2"])) scope_block1 = "Defined scope of assessment:" content.append(Paragraph(scope_block1, content_style)) # Prepare the scope data scope_data = [["Name", "Value"]] if metadata["cloud_service_provider"] == 1: # Azure scope_data.extend( [ ["Tenant ID", provider_details.get("tenantId", "N/A")], ["Client ID", provider_details.get("clientId", "N/A")], [ "Client Secret", anonymize_string(provider_details.get("clientSecret", "N/A")), ], ["Subscription ID", provider_details.get("subscriptionId", "N/A")], [ "Resource Group Name", provider_details.get("resourceGroupName", "N/A"), ], ] ) elif metadata["cloud_service_provider"] == 2: # AWS scope_data.extend( [ ["Access Key", provider_details.get("accessKey", "N/A")], [ "Secret Key", anonymize_string(provider_details.get("secretKey", "N/A")), ], ["Region", provider_details.get("region", "N/A")], ] ) else: scope_data.append(["N/A", "N/A"]) # Column widths scope_colWidths = [4 * cm, 11.5 * cm] # Create the scope table scope_table = Table(scope_data, colWidths=scope_colWidths) # Define the scope table style scope_table_style = TableStyle( [ ( "BACKGROUND", (0, 0), (-1, 0), HexColor("#115e59"), ), # Header row background color ("TEXTCOLOR", (0, 0), (-1, 0), colors.white), # Header row text color ("GRID", (0, 0), (-1, -1), 1, HexColor("#000000")), # Grid lines ("ALIGN", (0, 0), (-1, -1), "LEFT"), # Left align all cells ( "VALIGN", (0, 0), (-1, -1), "MIDDLE", ), # Middle vertical alignment for all cells ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), # Bold font for header row ("FONTSIZE", (0, 0), (-1, 0), 11), # Font size for header row ("BOTTOMPADDING", (0, 0), (-1, 0), 12), # Padding for header row ("TOPPADDING", (0, 0), (-1, 0), 12), # Padding for header row ] ) scope_table.setStyle(scope_table_style) # Add scope to content content.append(scope_table) content.append(Spacer(1, 12)) # --- # Page 1: Costs --- content.append(Paragraph("Costs", styles["Heading2"])) # costs_block1 = "Overview of the costs for the last 6 months:" # content.append(Paragraph(costs_block1, content_style)) costs_block2 = "Examining the costs reveals the financial impact of the transition, allowing for more informed decision-making and strategic planning." costs_paragraph = Paragraph(costs_block2, tablecontent_style) # Transform the cost data for the PDF months, costs, currency_symbol = transform_cost_inventory_for_pdf(cost_data) # Draw the cost chart cost_chart = draw_cost_chart(months, costs) # Create the data structure for the table costcharts_table_data = [ [costs_paragraph, "", "", cost_chart, "", ""], # Row 1: Paragraph and Chart months, # Row 2: Months [f"{currency_symbol} {cost:.2f}" for cost in costs], # Row 3: Costs ] # Create the table with 6 columns costcharts_table = Table( costcharts_table_data, colWidths=[2.58333333333 * cm] * 6 # Equal width columns ) # Define the table style costcharts_table_style = TableStyle( [ # Merge cells for Row 1 ("SPAN", (0, 0), (2, 0)), # Merge columns 1, 2, and 3 for the paragraph ("SPAN", (3, 0), (5, 0)), # Merge columns 4, 5, and 6 for the chart # Align the merged cell (Row 1, Column 1-2-3) to top-left ("VALIGN", (0, 0), (2, 0), "TOP"), # Align vertically to top ("ALIGN", (0, 0), (2, 0), "LEFT"), # Align horizontally to left # Remove padding for the merged cell in Row 1, Columns 1-2-3 ("LEFTPADDING", (0, 0), (2, 0), 0), ("RIGHTPADDING", (0, 0), (2, 0), 0), ("TOPPADDING", (0, 0), (2, 0), 0), ("BOTTOMPADDING", (0, 0), (2, 0), 0), # Background and text color for Row 2 (months) ( "BACKGROUND", (0, 1), (-1, 1), HexColor("#115e59"), ), # Row 2 background color ("TEXTCOLOR", (0, 1), (-1, 1), colors.white), # Row 2 text color ("FONTNAME", (0, 1), (-1, 1), "Helvetica-Bold"), # Bold font for Row 2 # Center alignment for Row 2 (months) ("ALIGN", (0, 1), (-1, 1), "CENTER"), # Center align -> Row 2 text # Font and alignment for Row 3 (costs) ("FONTNAME", (0, 2), (-1, 2), "Helvetica"), # Regular font for Row 3 ("ALIGN", (0, 2), (-1, 2), "CENTER"), # Center align -> Row 3 text # Grid lines for Row 2 and Row 3 ( "GRID", (0, 1), (-1, 2), 1, colors.black, ), # Grid for months and costs rows # Center alignment and vertical alignment for all cells ("VALIGN", (0, 0), (-1, -1), "MIDDLE"), # Vertical alignment for all cells ( "VALIGN", (0, 0), (2, 0), "TOP", ), # Align vertically to top for the merged cell ] ) # Apply the table style costcharts_table.setStyle(costcharts_table_style) # Add the table to your content content.append(costcharts_table) content.append(PageBreak()) # Page 2: Risks content.append(Spacer(1, header_padding)) content.append(Paragraph("Risk Assessment", styles["Heading1"])) risk_block1 = "The Risk Assessment provides a thorough evaluation of potential risks associated with the cloud resources utilized in the project and the alternative technologies available in the market:" content.append(Paragraph(risk_block1, content_style)) content.append(Spacer(1, 12)) # Transform the risk data for the PDF and get severity counts risks, severity_counts = transform_risk_inventory_for_pdf( risk_data, risk_definitions, resource_inventory ) # severity_counts is a dict like: {'high': X, 'medium': Y, 'low': Z} risk_chart_data = { "high": severity_counts["high"], "medium": severity_counts["medium"], "low": severity_counts["low"], } risk_chart = draw_risk_chart(risk_chart_data) content.append(risk_chart) content.append(Spacer(1, 12)) # Sort risks by severity severity_order = {"high": 1, "medium": 2, "low": 3} risks.sort(key=lambda r: severity_order[r["severity"]]) # Define the path to severity icons severity_icon_map = { "high": (os.path.join(report_path, "assets/icons/severity/high.png"), 22.5, 12), "medium": ( os.path.join(report_path, "assets/icons/severity/medium.png"), 39, 12, ), "low": (os.path.join(report_path, "assets/icons/severity/low.png"), 20.5, 12), } # Build the risk table data risk_table_data = [["#", "Risk name", "Impacted", "Severity"]] for i, risk in enumerate(risks): impacted_str = ( str(risk["impacted_resources_count"]) if risk["impacted_resources_count"] > 0 else "-" ) # Get the severity level and corresponding icon details severity_level = risk["severity"].lower() icon_details = severity_icon_map.get(severity_level, None) if icon_details: icon_path, icon_width, icon_height = icon_details if os.path.exists(icon_path): severity_icon = Image(icon_path, width=icon_width, height=icon_height) else: severity_icon = Paragraph("N/A", tablecontent_style) else: severity_icon = Paragraph("N/A", tablecontent_style) risk_table_data.append([str(i + 1), risk["name"], impacted_str, severity_icon]) # Add the total risks row total_risks = len(risks) risk_table_data.append(["Total Risks", "", "", str(total_risks)]) # Define column widths for the risk table risk_table_colWidths = [0.5 * cm, 10 * cm, 3 * cm, 2 * cm] risk_table = Table(risk_table_data, colWidths=risk_table_colWidths) risk_table_style_commands = [ ("BACKGROUND", (0, 0), (-1, 0), HexColor("#115e59")), # Header row background ("TEXTCOLOR", (0, 0), (-1, 0), colors.white), # Header text color ("BACKGROUND", (0, -1), (-1, -1), HexColor("#115e59")), # Last row background ("TEXTCOLOR", (0, -1), (-1, -1), colors.white), # Last row text color ("BOX", (0, 0), (-1, -1), 1, HexColor("#112726")), ("BOTTOMPADDING", (0, 0), (-1, 0), 12), # Padding for header row ("TOPPADDING", (0, 0), (-1, 0), 12), # Remove SPAN if not needed # ('SPAN', (-4, -1), (-2, -1)), ("ALIGN", (0, 1), (0, -2), "LEFT"), ("VALIGN", (0, 1), (0, -2), "MIDDLE"), ("ALIGN", (1, 1), (1, -2), "LEFT"), ("VALIGN", (1, 1), (1, -2), "MIDDLE"), ("ALIGN", (2, 1), (2, -2), "CENTER"), ("VALIGN", (2, 1), (2, -2), "MIDDLE"), ("ALIGN", (3, 1), (3, -2), "CENTER"), ("VALIGN", (3, 1), (3, -2), "MIDDLE"), ("ALIGN", (-1, 0), (-1, 0), "CENTER"), ("VALIGN", (-1, 0), (-1, 0), "MIDDLE"), ("ALIGN", (-1, -1), (-1, -1), "CENTER"), ("VALIGN", (-1, -1), (-1, -1), "MIDDLE"), ] risk_table.setStyle(TableStyle(risk_table_style_commands)) content.append(risk_table) content.append(PageBreak()) # Page 3: EscapeCloud Scoring if metadata.get("assessment_type") == 2: content.append(Spacer(1, header_padding)) content.append(Paragraph("EscapeCloud Scoring", styles["Heading1"])) content.append(Paragraph("Scoring #1 - Exit Score", styles["Heading2"])) scoring_block1 = "The following gauge chart visualizes a combined score that reflects both risk assessment results and the evaluation of alternative technologies:" content.append(Paragraph(scoring_block1, content_style)) content.append(Spacer(1, 12)) exit_score = scoring_data.get("exit_score", 0) if scoring_data else 0 # Define output path for charts chart_output_path = os.path.join(report_path, "assets/charts") os.makedirs(chart_output_path, exist_ok=True) exit_score_image_path = draw_exitscore_chart( exit_score, chart_output_path, width=750, height=500 ) # Define the table data exitscore_table_data = [ ["", ""], ["Complex (0 - 20)", ""], ["Challenging (20 - 40)", ""], ["Manageable (40 - 60)", ""], ["Smooth Transition (60 - 80)", ""], ["Seamless (80 - 100)", ""], ] exitscore_table_data[1][1] = Image( exit_score_image_path, width=7.5 * cm, height=5 * cm ) # Column widhts exitscore_colWidths = [5 * cm, 10.5 * cm] # Create the table exitscore_table = Table(exitscore_table_data, colWidths=exitscore_colWidths) # Style the table exitscore_table_style = TableStyle( [ ("SPAN", (0, 0), (1, 0)), ("BACKGROUND", (0, 0), (1, 0), HexColor("#115e59")), ("TEXTCOLOR", (0, 0), (1, 0), colors.white), ("FONTNAME", (0, 0), (1, 0), "Helvetica-Bold"), ("ALIGN", (0, 0), (1, 0), "CENTER"), ("VALIGN", (0, 0), (1, 0), "MIDDLE"), ("SPAN", (1, 1), (1, 5)), ("GRID", (0, 0), (-1, -1), 1, colors.black), ("ALIGN", (0, 1), (0, 5), "LEFT"), ("VALIGN", (0, 1), (0, 5), "MIDDLE"), ("ALIGN", (1, 1), (1, 1), "CENTER"), ("VALIGN", (1, 1), (1, 1), "MIDDLE"), ] ) exitscore_table.setStyle(exitscore_table_style) content.append(exitscore_table) content.append(Spacer(1, 12)) content.append( Paragraph("Scoring #2 - Vendor Lock-In Score", styles["Heading2"]) ) scoring_block2 = "The following radar chart visualizes the assessment of alternative technologies across three dimensions: Human (skills availability), Technology (maturity and vendor stability), and Operational (ecosystem and support services) — only where viable alternatives exist:" content.append(Paragraph(scoring_block2, content_style)) content.append(Spacer(1, 12)) human_score = scoring_data.get("human_score", 0) if scoring_data else 0 technology_score = ( scoring_data.get("technology_score", 0) if scoring_data else 0 ) operational_score = ( scoring_data.get("operational_score", 0) if scoring_data else 0 ) vendor_lockin_chart = draw_vendor_lockin_radar_chart( human_score, technology_score, operational_score ) content.append(vendor_lockin_chart) # Define the table data vendor_lockin_table_data = [ ["Human", "Technology", "Operational"], [human_score, technology_score, operational_score], ] # Column widhts vendor_lockin_colWidths = [5 * cm, 5 * cm, 5 * cm] # Create the table vendor_lockin_table = Table( vendor_lockin_table_data, colWidths=vendor_lockin_colWidths ) # Style the table vendor_lockin_table_style = TableStyle( [ ("BACKGROUND", (0, 0), (-1, 0), HexColor("#115e59")), ("TEXTCOLOR", (0, 0), (-1, 0), colors.white), ("ALIGN", (0, 0), (-1, 0), "CENTER"), ("VALIGN", (0, 0), (-1, 0), "MIDDLE"), ("ALIGN", (0, 0), (-1, -1), "CENTER"), ("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"), ("GRID", (0, 0), (-1, -1), 1, colors.black), ] ) vendor_lockin_table.setStyle(vendor_lockin_table_style) content.append(vendor_lockin_table) content.append(PageBreak()) # Page 4: Resource Inventory content.append(Spacer(1, header_padding)) content.append(Paragraph("Resource Inventory", styles["Heading1"])) res_block1 = "The Resource Inventory provides a summary of the cloud resources provisioned within the defined scope:" content.append(Paragraph(res_block1, content_style)) content.append(Spacer(1, 12)) # Transform the resource inventory data for the PDF resources = transform_resource_inventory_for_pdf( resource_inventory, resource_type_mapping, report_path ) # Compute total resources total_resources = sum(res["count"] for res in resources) # Build the table data resource_data = [["#", "Resource type", "", "No."]] for res in resources: resource_data.append( [ str(res["id"]), res["resource_name"], Image(res["icon_url"], width=20, height=20), str(res["count"]), ] ) # Add the total resources row resource_data.append(["Total Resources", "", "", str(total_resources)]) res_colWidths = [1 * cm, 11.5 * cm, 1.5 * cm, 1.5 * cm] res_table = Table(resource_data, colWidths=res_colWidths) res_table_style_commands = [ ( "BACKGROUND", (0, 0), (-1, 0), HexColor("#115e59"), ), # Header row background color ("TEXTCOLOR", (0, 0), (-1, 0), colors.white), # Header row text color ( "BACKGROUND", (0, -1), (-1, -1), HexColor("#115e59"), ), # Last row background color ("TEXTCOLOR", (0, -1), (-1, -1), colors.white), # Last row text color ("BOX", (0, 0), (-1, -1), 1, HexColor("#112726")), ("BOTTOMPADDING", (0, 0), (-1, 0), 12), # Padding for header row ("TOPPADDING", (0, 0), (-1, 0), 12), # Padding for header row # If you previously had a SPAN on the last row, remove if not needed now. # ('SPAN', (-4, -1), (-2, -1)), # remove if not required ("ALIGN", (0, 1), (0, -2), "LEFT"), # Aligning the '#' column ("VALIGN", (0, 1), (0, -2), "MIDDLE"), ("ALIGN", (1, 1), (1, -2), "LEFT"), # Resource name column ("VALIGN", (1, 1), (1, -2), "MIDDLE"), ("ALIGN", (2, 1), (2, -2), "CENTER"), # Icon column ("VALIGN", (2, 1), (2, -2), "MIDDLE"), ("ALIGN", (3, 1), (3, -2), "CENTER"), # Number column ("VALIGN", (3, 1), (3, -2), "MIDDLE"), ("ALIGN", (-1, 0), (-1, 0), "CENTER"), ("VALIGN", (-1, 0), (-1, 0), "MIDDLE"), ("ALIGN", (-1, -1), (-1, -1), "CENTER"), ("VALIGN", (-1, -1), (-1, -1), "MIDDLE"), ] res_table_style = TableStyle(res_table_style_commands) res_table.setStyle(res_table_style) content.append(res_table) content.append(PageBreak()) # Page 5: Alternative Technologies content.append(Spacer(1, header_padding)) content.append(Paragraph("Alternative Technologies", styles["Heading1"])) alttech_block = ( "The Alternative Technology provides a summary of the alternative technology landscape " "for each identified resource in the Resource Inventory, based on our dataset and market research. " "It also includes a count of the available alternative technologies for each resource:" ) content.append(Paragraph(alttech_block, content_style)) content.append(Spacer(1, 12)) # Transform the alternative technologies data for the PDF alttech = transform_alt_tech_for_pdf( resource_inventory, resource_type_mapping, alternatives, alternative_technologies, exit_strategy, report_path, ) # Build the table data alttech_data = [["#", "Resource type", "", "No."]] for res in alttech: alttech_data.append( [ str(res["id"]), res["resource_name"], Image(res["icon_url"], width=20, height=20) if res["icon_url"] else "", str(res["count"]), ] ) # Define the column widths alttech_colWidths = [1 * cm, 11.5 * cm, 1.5 * cm, 1.5 * cm] # Create and style the alternative technology table alttech_table = Table(alttech_data, colWidths=alttech_colWidths) alttech_table_style_commands = [ ( "BACKGROUND", (0, 0), (-1, 0), HexColor("#115e59"), ), # Header row background color ("TEXTCOLOR", (0, 0), (-1, 0), colors.white), # Header row text color ("BOX", (0, 0), (-1, -1), 1, HexColor("#000000")), # Draw box around the table ( "BOTTOMPADDING", (0, 1), (-1, -1), 6, ), # Apply bottom padding to all rows except the header ( "TOPPADDING", (0, 1), (-1, -1), 6, ), # Apply top padding to all rows except the header ( "ALIGN", (2, 0), (2, -1), "CENTER", ), # Center align the text in the icon column ("VALIGN", (2, 0), (2, -1), "MIDDLE"), ("ALIGN", (0, 1), (0, -1), "LEFT"), ("VALIGN", (0, 1), (0, -1), "MIDDLE"), ("ALIGN", (1, 1), (1, -1), "LEFT"), ("VALIGN", (1, 1), (1, -1), "MIDDLE"), ("ALIGN", (2, 1), (2, -1), "CENTER"), ("VALIGN", (2, 1), (2, -1), "MIDDLE"), ("ALIGN", (3, 1), (3, -1), "CENTER"), ("VALIGN", (3, 1), (3, -1), "MIDDLE"), ("ALIGN", (-1, 0), (-1, 0), "CENTER"), # Center align the "No." header ("VALIGN", (-1, 0), (-1, 0), "MIDDLE"), ] alttech_table.setStyle(TableStyle(alttech_table_style_commands)) content.append(alttech_table) content.append(PageBreak()) # Build the PDF document logger.debug("Building the PDF document...") doc.build(content, onFirstPage=header_footer, onLaterPages=header_footer) # Return the path of the generated PDF return pdf_path ================================================ FILE: core/utils_report_common.py ================================================ from collections import defaultdict from datetime import datetime from typing import Any, Dict, List, Optional, Tuple CURRENCY_SYMBOLS = { "USD": "$", "GBP": "£", "EUR": "€", } def sort_cost_data(cost_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: return sorted(cost_data, key=lambda x: datetime.strptime(x["month"], "%Y-%m-%d")) def summarize_costs( cost_data: List[Dict[str, Any]], *, last_n: Optional[int] = None ) -> Tuple[List[str], List[float], float, str, str]: sorted_costs = sort_cost_data(cost_data) if last_n is not None: sorted_costs = sorted_costs[-last_n:] months = [ datetime.strptime(item["month"], "%Y-%m-%d").strftime("%b") for item in sorted_costs ] values = [item["cost"] for item in sorted_costs] total_cost = round(sum(values), 2) if sorted_costs: currency_code = sorted_costs[0].get("currency", "USD") else: currency_code = "USD" currency_symbol = CURRENCY_SYMBOLS.get(currency_code, currency_code) return months, values, total_cost, currency_code, currency_symbol def summarize_risks( risk_data: List[Dict[str, Any]], risk_definitions: List[Dict[str, Any]], *, resource_name_map: Optional[Dict[str, str]] = None, resource_id_map: Optional[Dict[str, int]] = None, ) -> Tuple[List[Dict[str, Any]], Dict[str, int]]: risk_def_map = {rd["id"]: rd for rd in risk_definitions} severity_counts = {"high": 0, "medium": 0, "low": 0} grouped_risks = defaultdict( lambda: { "impacted_resource_types": set(), "impacted_resources_count": 0, "has_overall_risk": False, } ) for entry in risk_data: risk_code = entry["risk"] resource_type = entry["resource_type"] if resource_type is None or resource_type == "null": grouped_risks[risk_code]["has_overall_risk"] = True continue resource_type = str(resource_type) grouped_risks[risk_code]["impacted_resource_types"].add(resource_type) grouped_risks[risk_code]["impacted_resources_count"] += 1 summarized_risks = [] for risk_code, risk_info in grouped_risks.items(): risk_definition = risk_def_map.get(risk_code) if not risk_definition: continue severity = risk_definition["severity"] if severity in severity_counts: severity_counts[severity] += 1 resource_types = sorted(risk_info["impacted_resource_types"]) resource_names = None if resource_name_map is not None: resource_names = [ resource_name_map.get(resource_type, "Unknown Resource") for resource_type in resource_types ] resource_ids = None if resource_id_map is not None: resource_ids = [ resource_id_map[resource_type] for resource_type in resource_types if resource_type in resource_id_map ] impacted_resources_count = ( None if risk_info["has_overall_risk"] else risk_info["impacted_resources_count"] ) summarized_risks.append( { "id": risk_code, "name": risk_definition["name"], "description": risk_definition["description"], "severity": severity, "impacted_resource_types": resource_types, "impacted_resources": resource_names, "impacted_resource_ids": resource_ids, "impacted_resources_count": impacted_resources_count, } ) return summarized_risks, severity_counts def summarize_alternative_technologies( resource_inventory: List[Dict[str, Any]], alternatives: List[Dict[str, Any]], alternative_technologies: List[Dict[str, Any]], exit_strategy: int, ) -> Dict[str, List[Dict[str, Any]]]: active_technologies = { tech["id"]: tech for tech in alternative_technologies if tech.get("status") == "t" } grouped_alt_tech: Dict[str, List[Dict[str, Any]]] = { str(resource["resource_type"]): [] for resource in resource_inventory } for alt in alternatives: if str(alt["strategy_type"]) != str(exit_strategy): continue resource_type = str(alt["resource_type"]) tech = active_technologies.get(alt["alternative_technology"]) if not tech or resource_type not in grouped_alt_tech: continue grouped_alt_tech[resource_type].append( { "product_name": tech["product_name"], "product_description": tech["product_description"], "product_url": tech["product_url"], "open_source": tech["open_source"] == "t", "support_plan": tech["support_plan"] == "t", "status": tech["status"] == "t", } ) return grouped_alt_tech def enrich_resource_inventory( resource_inventory: List[Dict[str, Any]], resource_type_mapping: Dict[str, Dict[str, Any]], *, report_path: Optional[str] = None, ) -> List[Dict[str, Any]]: enriched_resources = [] for idx, resource in enumerate(resource_inventory): resource_type = str(resource["resource_type"]) resource_info = resource_type_mapping.get(resource_type, {}) icon = resource_info.get("icon", "/icons/default.png") entry = { "id": idx + 1, "resource_type": resource_type, "code": resource_info.get("code", "N/A"), "resource_name": resource_info.get("name", "Unknown Resource"), "icon": icon, "location": resource.get("location", "Unknown"), "count": resource.get("count", 0), } if report_path is not None: entry["icon_url"] = f"{report_path}/assets{icon}" enriched_resources.append(entry) return enriched_resources ================================================ FILE: core/utils_report_html.py ================================================ # core/utils_report_html.py import logging from typing import List, Dict, Any, Tuple from core.utils_report_common import ( summarize_alternative_technologies, summarize_costs, summarize_risks, ) # Configure logger logger = logging.getLogger("core.engine.report_html") logger.setLevel(logging.INFO) def transform_cost_inventory_for_html( cost_data: List[Dict[str, Any]], ) -> Tuple[List[str], List[float], float, str, str]: return summarize_costs(cost_data) def transform_risk_inventory_for_html( risk_data: List[Dict[str, Any]], risk_definitions: List[Dict[str, Any]], resource_inventory: Dict[str, Dict[str, Any]], ) -> Tuple[List[Dict[str, Any]], Dict[str, int]]: severity_order = {"high": 1, "medium": 2, "low": 3} resource_name_map = { str(key): value["name"] for key, value in resource_inventory.items() } risks, severity_counts = summarize_risks( risk_data, risk_definitions, resource_name_map=resource_name_map, ) risks.sort(key=lambda x: severity_order.get(x["severity"], 4)) return risks, severity_counts def transform_alt_tech_for_html( resource_inventory: List[Dict[str, Any]], alternatives: List[Dict[str, Any]], alternative_technologies: List[Dict[str, Any]], exit_strategy: int, ) -> List[Dict[str, Any]]: alt_tech_data = [] grouped_alt_tech = summarize_alternative_technologies( resource_inventory, alternatives, alternative_technologies, exit_strategy, ) for resource in resource_inventory: resource_type = str(resource.get("resource_type")) for tech in grouped_alt_tech.get(resource_type, []): alt_tech_data.append( { "resource_type_id": resource.get("resource_type"), **tech, } ) return alt_tech_data ================================================ FILE: core/utils_report_json.py ================================================ # core/utils_report_json.py import logging from typing import List, Dict, Any from core.utils_report_common import ( enrich_resource_inventory, sort_cost_data, summarize_alternative_technologies, summarize_risks, ) # Configure logger logger = logging.getLogger("core.engine.report_json") logger.setLevel(logging.INFO) def transform_resource_inventory_for_json( resource_inventory: List[Dict[str, Any]], resource_type_mapping: Dict[str, Dict[str, Any]], ) -> List[Dict[str, Any]]: enriched_resources = enrich_resource_inventory( resource_inventory, resource_type_mapping ) return [ { "id": resource["id"], "code": resource["code"], "resource_name": resource["resource_name"], "location": resource["location"], "count": resource["count"], } for resource in enriched_resources ] def transform_cost_inventory_for_json( cost_data: List[Dict[str, Any]], ) -> List[Dict[str, Any]]: sorted_cost_data = sort_cost_data(cost_data) cost_inventory = [ { "month": item["month"], "cost": round(item["cost"], 2), "currency": item["currency"], } for item in sorted_cost_data ] return cost_inventory def transform_risk_inventory_for_json( risk_data: List[Dict[str, Any]], risk_definitions: List[Dict[str, Any]], resource_inventory: List[Dict[str, Any]], ) -> List[Dict[str, Any]]: resource_id_map = { str(value["resource_type"]): key + 1 for key, value in enumerate(resource_inventory) } risks, _ = summarize_risks( risk_data, risk_definitions, resource_id_map=resource_id_map, ) return [ { "id": risk["id"], "name": risk["name"], "description": risk["description"], "severity": risk["severity"], "impacted_resources": risk["impacted_resource_ids"] or [], "impacted_resources_count": risk["impacted_resources_count"], } for risk in risks ] def transform_alt_tech_for_json( resource_inventory: List[Dict[str, Any]], alternatives: List[Dict[str, Any]], alternative_technologies: List[Dict[str, Any]], exit_strategy: int, ) -> Dict[int, List[Dict[str, Any]]]: resource_id_map = { str(value["resource_type"]): key + 1 for key, value in enumerate(resource_inventory) } grouped_alt_tech = summarize_alternative_technologies( resource_inventory, alternatives, alternative_technologies, exit_strategy, ) grouped_alt_tech_data = { resource_id: [] for resource_id in resource_id_map.values() } for resource_type, technologies in grouped_alt_tech.items(): resource_id = resource_id_map.get(resource_type) if not resource_id: continue grouped_alt_tech_data[resource_id] = [ {"id": idx + 1, **tech} for idx, tech in enumerate(technologies) ] return { key: grouped_alt_tech_data[key] for key in sorted(grouped_alt_tech_data.keys()) } ================================================ FILE: core/utils_report_pdf.py ================================================ # core/utils_report_pdf.py import os import math import logging from datetime import datetime from typing import List, Dict, Any, Tuple from math import cos, sin, radians # ReportLab from reportlab.lib.pagesizes import A4 from reportlab.lib.units import cm from reportlab.lib.styles import ParagraphStyle from reportlab.lib import colors from reportlab.lib.colors import HexColor from reportlab.platypus import Paragraph, Image, Table, TableStyle from reportlab.graphics.shapes import Drawing, Polygon, Line, String from reportlab.graphics.charts.legends import Legend from reportlab.graphics.charts.piecharts import Pie from reportlab.graphics.charts.barcharts import VerticalBarChart # Plotly import plotly.graph_objects as go from core.utils_report_common import ( enrich_resource_inventory, summarize_alternative_technologies, summarize_costs, summarize_risks, ) # Configure logger logger = logging.getLogger("core.engine.report_pdf") logger.setLevel(logging.INFO) def transform_resource_inventory_for_pdf( resource_inventory: list, resource_type_mapping: Dict[str, Any], report_path: str ) -> List[Dict[str, Any]]: enriched_resources = enrich_resource_inventory( resource_inventory, resource_type_mapping, report_path=report_path, ) return [ { "id": resource["id"], "resource_name": resource["resource_name"], "icon_url": resource["icon_url"], "location": resource["location"], "count": resource["count"], } for resource in enriched_resources ] def transform_cost_inventory_for_pdf( cost_data: list, ) -> Tuple[List[str], List[float], str]: months, costs, _, _, currency_symbol = summarize_costs(cost_data, last_n=6) return months, costs, currency_symbol def transform_risk_inventory_for_pdf( risk_data: list, risk_definitions: list, resource_inventory: list ) -> Tuple[List[Dict[str, Any]], Dict[str, int]]: risks, severity_counts = summarize_risks(risk_data, risk_definitions) return [ { "name": risk["name"], "severity": risk["severity"], "impacted_resources_count": risk["impacted_resources_count"] or 0, } for risk in risks ], severity_counts def transform_alt_tech_for_pdf( resource_inventory: list, resource_type_mapping: Dict[str, Any], alternatives: list, alternative_technologies: list, exit_strategy: int, report_path: str, ) -> List[Dict[str, Any]]: grouped_alt_tech = summarize_alternative_technologies( resource_inventory, alternatives, alternative_technologies, exit_strategy, ) alt_tech = [] for idx, resource in enumerate(resource_inventory): rtype_str = str(resource["resource_type"]) rtype_info = resource_type_mapping.get(rtype_str, {}) resource_name = rtype_info.get("name", "Unknown Resource") icon_path = "/assets" + rtype_info.get("icon", "/icons/default.png") icon_url = f"{report_path}{icon_path}" count = len(grouped_alt_tech.get(rtype_str, [])) alt_tech.append( { "id": idx + 1, "resource_name": resource_name, "icon_url": icon_url, "count": count, } ) return alt_tech def draw_header_footer(report_path: str, canvas, doc) -> None: # Save the state of the canvas to not affect the drawing canvas.saveState() width, height = A4 # Include the date in the format mm-dd-yyyy current_date = datetime.now().strftime("%m-%d-%Y") left_text_content1 = "EscapeCloud Community Edition - Report" left_text_content2 = f"Date: {current_date}" # Define the header content with Paragraphs header_style = ParagraphStyle( "HeaderStyle", fontSize=10, textColor=HexColor("#9cafae") ) header_data = [ [Paragraph(left_text_content1, header_style), "", ""], [Paragraph(left_text_content2, header_style), "", ""], ] # Create the header table table = Table( header_data, colWidths=[width - 188 - doc.rightMargin - doc.leftMargin, 10, 150] ) # Define the style for the table table.setStyle( TableStyle( [ ("SPAN", (1, 0), (1, 1)), # Merge Column 2 in both rows ("SPAN", (2, 0), (2, 1)), # Merge Column 3 in both rows ( "ALIGN", (0, 0), (0, 0), "LEFT", ), # Align left_text_content1 to the left ( "ALIGN", (0, 1), (0, 1), "LEFT", ), # Align left_text_content2 to the left ("ALIGN", (2, 0), (2, 1), "RIGHT"), # Align logo to the right ("VALIGN", (0, 0), (0, 1), "TOP"), # Vertically align to the top ("VALIGN", (2, 0), (2, 1), "MIDDLE"), # Vertically align to the middle # ('GRID', (0, 0), (-1, -1), 0.5, colors.red), # Temporary borders for visualization ] ) ) # Build the header table and draw it on the canvas table.wrapOn(canvas, doc.leftMargin, height - doc.topMargin) table.drawOn(canvas, doc.leftMargin, height - doc.topMargin) # Add the logo logo_path = f"{report_path}/assets/img/logo/report_logo.png" logo = Image(logo_path, width=150, height=30) # Aligning logo vertically with the text logo_y = height - doc.topMargin + 5 logo.drawOn(canvas, width - 150 - doc.rightMargin, logo_y) # Line below the header canvas.setStrokeColor(HexColor("#115e59")) canvas.setLineWidth(1) line_y = height - doc.topMargin - 10 canvas.line(doc.leftMargin, line_y, width - doc.rightMargin, line_y) # Footer footer_padding = 15 # Add padding under the page number canvas.setStrokeColor(HexColor("#115e59")) canvas.line(40, 60 + footer_padding, A4[0] - 40, 60 + footer_padding) canvas.setFont("Helvetica", 8) canvas.drawString(A4[0] / 2 - 30, 60 + footer_padding - 15, f"Page {doc.page}") canvas.setFont("Helvetica-Oblique", 8) canvas.setFillColor(HexColor("#9cafae")) canvas.drawCentredString( A4[0] / 2, 40, "EscapeCloud Community Edition - This report is provided 'As Is,' without any warranty of any kind.", ) canvas.drawCentredString( A4[0] / 2, 30, "EscapeCloud makes no warranty that the information contained in this report is complete or error-free. Copyright 2024-2025", ) # Restore the state of the canvas canvas.restoreState() def draw_risk_chart(risk_chart_data: Dict[str, int]) -> Drawing: # Define colors for each severity and their border colors severity_colors = { "high": HexColor("#991b1b"), "medium": HexColor("#ffae1f"), "low": HexColor("#539bff"), } # Border colors border_colors = { "high": HexColor("#991b1b"), "medium": HexColor("#ffae1f"), "low": HexColor("#539bff"), } # Create a drawing for the Doughnut chart d = Drawing(300, 200) # Create the Pie (Doughnut) chart pie = Pie() pie.x = 100 pie.y = 25 pie.width = 150 pie.height = 150 pie.data = list(risk_chart_data.values()) pie.innerRadiusFraction = 0.5 # Assign colors and borders for each severity level for i, severity in enumerate(risk_chart_data.keys()): pie.slices[i].fillColor = severity_colors[severity] pie.slices[i].strokeColor = border_colors[severity] pie.slices[i].strokeWidth = 1 # Set the border width # Add the Pie chart to the drawing d.add(pie) # Create a Legend with headers legend = Legend() legend.x = 280 legend.y = 130 legend.dxTextSpace = 10 legend.columnMaximum = 6 legend.alignment = "right" legend.subCols[0].minWidth = 60 legend.subCols[1].minWidth = 30 legend.colorNamePairs = [ (severity_colors[severity], (severity, str(risk_chart_data[severity]))) for severity in risk_chart_data.keys() ] # Configure sub-columns for the legend legend.subCols[0].align = "left" legend.subCols[1].align = "right" # Add the Legend to the drawing d.add(legend) # Create a Legend Header legend_header = Legend() legend_header.x = 280 legend_header.y = 150 legend_header.dxTextSpace = 10 legend_header.colorNamePairs = [ (HexColor("#FFFFFF"), ("Severity", "No.")) ] # Corrected line legend_header.alignment = "right" legend_header.subCols[0].align = "left" legend_header.subCols[0].minWidth = 60 legend_header.subCols[1].align = "right" legend_header.subCols[1].minWidth = 30 # Add the Legend Header to the drawing d.add(legend_header) return d def draw_cost_chart(months: List[str], costs: List[float]) -> Drawing: # Create a drawing for the bar chart d = Drawing(7.5 * cm, 5 * cm) # Create a Vertical Bar Chart bar_chart = VerticalBarChart() bar_chart.x = 20 bar_chart.y = 20 bar_chart.width = 6.5 * cm bar_chart.height = 4 * cm bar_chart.data = [costs] bar_chart.barWidth = 0.8 * cm # Style the bars bar_chart.bars[0].fillColor = HexColor("#055160") bar_chart.bars[0].strokeColor = HexColor("#055160") # Set the categories (months) bar_chart.categoryAxis.categoryNames = months # Calculate valueMax max_cost = max(costs) if costs else 0 bar_chart.valueAxis.valueMax = ( math.ceil(max_cost / 10.0) * 10 if max_cost > 0 else 10 ) bar_chart.valueAxis.valueMin = 0 # Add the bar chart to the drawing d.add(bar_chart) return d def draw_exitscore_chart( exit_score: int, output_path: str, width: int = 750, height: int = 500 ) -> str: # Create the gauge chart fig = go.Figure( go.Indicator( mode="gauge+number", value=exit_score, domain={"x": [0, 1], "y": [0, 1]}, gauge={ "axis": {"range": [0, 100], "tickwidth": 0.2, "tickcolor": "darkgray"}, "bar": {"color": "#f3f6f6", "thickness": 0.2}, "steps": [ {"range": [0, 20], "color": "#ba1c1d"}, {"range": [20, 40], "color": "#ff9533"}, {"range": [40, 60], "color": "#f1ca00"}, {"range": [60, 80], "color": "#76c31d"}, {"range": [80, 100], "color": "#065f43"}, ], }, ) ) image_file = os.path.join(output_path, "exit_score_chart.png") fig.write_image(image_file, width=width, height=height) return image_file def draw_vendor_lockin_radar_chart( human: int, technology: int, operational: int ) -> Drawing: # Create a drawing for the radar chart d = Drawing(350, 250) # Define the labels and data labels = ["Human", "Technology", "Operational"] data = [human, technology, operational] # Define your hex color with alpha bg_color = HexColor("#4BC0C0") bg_color.alpha = 0.2 # Set alpha for the fill color border_color = HexColor("#4BC0C0") # Border color with default alpha (1) # Normalize data max_value = 5 normalized_data = [i / max_value for i in data] # Define the number of facets and calculate the angle of each facet num_facets = len(labels) angle = 360 / num_facets # Adjust the starting angle for pyramid orientation start_angle = -30 # Define the center and radius of the radar chart cx = 230 cy = 125 radius = 100 # Radius # Draw concentric polygons for level in range(1, int(max_value) + 1): points = [] for i in range(num_facets): x = cx + radius * cos(radians(start_angle + i * angle)) y = cy + radius * sin(radians(start_angle + i * angle)) points.extend([x, y]) d.add(Polygon(points, fillColor=None, strokeColor=colors.grey)) # Draw lines connecting the vertices of concentric polygons for level in range(1, int(max_value)): prev_x = None prev_y = None first_x = None first_y = None for i in range(num_facets): x = cx + (radius * level / max_value) * cos( radians(start_angle + i * angle) ) y = cy + (radius * level / max_value) * sin( radians(start_angle + i * angle) ) # Store the first x and y coordinates to close the triangle later if i == 0: first_x = x first_y = y # If not the first vertex, draw a line from the previous vertex to the current vertex if prev_x is not None and prev_y is not None: d.add(Line(prev_x, prev_y, x, y, strokeColor=colors.grey)) prev_x = x prev_y = y # Close the triangle by drawing a line from the last vertex to the first vertex d.add(Line(prev_x, prev_y, first_x, first_y, strokeColor=colors.grey)) # Draw the data polygon points = [] for i in range(num_facets): x = cx + radius * normalized_data[i] * cos(radians(start_angle + i * angle)) y = cy + radius * normalized_data[i] * sin(radians(start_angle + i * angle)) points.extend([x, y]) d.add(Polygon(points, fillColor=bg_color, strokeColor=border_color)) # Draw labels for i in range(num_facets): x = cx + radius * cos(radians(start_angle + i * angle)) y = cy + radius * sin(radians(start_angle + i * angle)) d.add(Line(cx, cy, x, y, strokeColor=colors.grey)) # Adjust label position and anchor based on quadrant anchor = "middle" # Default text anchor label_text = labels[i] # Adjust padding and anchor for different quadrants if necessary if i * angle > 90 and i * angle < 270: anchor = "end" if i * angle < 180 else "start" # Adjust text anchor label_padding = ( 10 # Additional padding to move the label slightly outward from max_value ) label_x = cx + (radius + label_padding) * cos(radians(start_angle + i * angle)) label_y = cy + (radius + label_padding) * sin(radians(start_angle + i * angle)) # Adjustments based on label if label_text == "Technology": label_x += 25 label_y -= 10 elif label_text == "Operational": label_x -= 50 label_y -= 10 elif label_text == "Human": label_y += 5 label_x += 10 d.add( String( label_x, label_y, label_text, fontSize=10, fillColor=colors.black, textAnchor=anchor, ) ) return d ================================================ FILE: core/utils_sync.py ================================================ # core/utils_sync.py from __future__ import annotations import json import logging import os import time import config import requests from typing import Any, Dict, List from core.utils_db import load_data # Configure logger logger = logging.getLogger("core.engine.sync") logger.setLevel(logging.INFO) _ASSESS_PATH = "/api/v1/assessments/" def _assess_url(host: str) -> str: host = host.strip().rstrip("/") if not host.startswith("http"): host = f"https://{host}" return f"{host}{_ASSESS_PATH}" def _build_payload( *, report_path: str, name: str, started_at: int, exit_strategy: int, cloud_service_provider: int, assessment_type: int, ) -> Dict[str, Any]: db_path = os.path.join(report_path, "data", "assessment.db") resource_rows: List[Dict[str, Any]] = load_data( "resource_inventory", db_path=db_path ) cost_rows: List[Dict[str, Any]] = load_data("cost_inventory", db_path=db_path) res_payload = [ { "id": int(r["resource_type"]), "location": r.get("location") or "unknown", "count": int(r.get("count", 0)), } for r in resource_rows ] cost_payload = [ { "month": c["month"], "cost": float(c["cost"]), "currency": c["currency"], } for c in cost_rows ] engine_version = getattr(config, "CLI_VERSION", "v1.0.0").strip() now = int(time.time()) payload: Dict[str, Any] = { "id": os.urandom(16).hex(), "object": "event", "cli_version": engine_version, "created": now, "type": "local.assessment.succeeded", "data": { "name": name, "exit_strategy": exit_strategy, "cloud_service_provider": cloud_service_provider, "assessmentType": assessment_type, "started_at": started_at, "completed_at": now, "success": True, "resource_inventory": res_payload, "cost_inventory": cost_payload, }, } logger.debug("Outgoing payload:\n%s", json.dumps(payload, indent=2)) return payload def post_assessment( *, name: str, started_at: int, report_path: str, meta: Dict[str, int], token: str, timeout: int = 10, ) -> Dict[str, Any]: host = getattr(config, "HOST", "").strip() if not host: return {"success": False, "payload": None, "logs": "HOST missing in config.py"} url = _assess_url(host) headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} payload = _build_payload( report_path=report_path, started_at=started_at, name=name, exit_strategy=meta["exit_strategy"], cloud_service_provider=meta["cloud_service_provider"], assessment_type=meta["assessment_type"], ) try: resp = requests.post(url, headers=headers, json=payload, timeout=timeout) ok = resp.ok return { "success": ok, "payload": resp.json() if ok else None, "logs": f"server responded {resp.status_code}", } except requests.RequestException as exc: return {"success": False, "payload": None, "logs": f"POST failed: {exc}"} ================================================ FILE: main.py ================================================ # main.py import logging import argparse import boto3 import time import sys from rich.console import Console from datetime import datetime from botocore.exceptions import NoCredentialsError, ProfileNotFound from azure.identity import DefaultAzureCredential, ClientSecretCredential from azure.mgmt.resource import SubscriptionClient, ResourceManagementClient # Import the functions from core.engine import ( verify_credentials, test_permissions, create_resource_inventory, create_cost_inventory, perform_risk_assessment, sync_assessment, generate_report, ) from utils.azure import ( select_subscription, select_resource_group, is_azure_cli_installed, is_azure_cli_logged_in, is_azure_cli_token_expired, ) from utils.aws import is_aws_cli_installed, is_aws_profile_valid from utils.connection import resolve_mode from utils.data import initialize_dataset from utils.utils import ( ascii_art, create_directory, load_config, prompt_required_inputs, print_help_message, print_step, ) from utils.validate import validate_region, validate_config # Configure the root logger to ensure logs propagate from all modules logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) logging.getLogger("botocore").setLevel(logging.WARNING) logging.getLogger("boto3").setLevel(logging.WARNING) logging.getLogger("kaleido").setLevel(logging.WARNING) logging.getLogger("choreographer").setLevel(logging.WARNING) # Configure the logger logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # Initialize the console object console = Console() def handle_aws(args): config = {} cloud_provider = 2 if args.config: # logger.info(f"AWS --config argument detected with path: {args.config}") config = load_config(args.config) if not config: console.print("[red]Invalid or missing AWS configuration file.[/red]") return # Handle name field logic (priority: --name > config name > fallback) if args.name: config["name"] = args.name.strip() if "name" not in config or not config["name"].strip(): config["name"] = ( f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ) elif args.profile: # Check if aws cli available if not is_aws_cli_installed(): # logger.error("AWS CLI is not installed.") console.print( "[red]AWS CLI is not installed. Install it from https://aws.amazon.com/cli/[/red]" ) return # Check if aws cli profile is valid if not is_aws_profile_valid(args.profile): # logger.error(f"AWS profile '{args.profile}' is not configured.") console.print( f"[red]AWS profile '{args.profile}' is not configured. Use `aws configure --profile {args.profile}`.[/red]" ) return # logger.info(f"AWS --profile argument detected with profile: {args.profile}") try: session = boto3.Session(profile_name=args.profile) credentials = session.get_credentials() if credentials is None: # logger.error(f"AWS profile '{args.profile}' has no valid credentials.") console.print( f"[red]AWS profile '{args.profile}' has no valid credentials. Use `aws configure --profile {args.profile}`.[/red]" ) return region = session.region_name or "us-east-1" # logger.info(f"Using AWS profile '{args.profile}' with region '{region}'.") exit_strategy, assessment_type = prompt_required_inputs() config = { "name": ( args.name.strip() if args.name else f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ), "cloudServiceProvider": cloud_provider, "exitStrategy": exit_strategy, "assessmentType": assessment_type, "providerDetails": { "accessKey": credentials.access_key, "secretKey": credentials.secret_key, "region": region, }, } except (NoCredentialsError, ProfileNotFound) as e: # logger.error(f"AWS profile error: {e}", exc_info=True) console.print( f"[red]AWS profile error: {str(e)}. Use `aws configure` to set up a profile.[/red]" ) return else: exit_strategy, assessment_type = prompt_required_inputs() # Prompt for manual input try: access_key = input("Enter AWS Access Key: ").strip() secret_key = input("Enter AWS Secret Key: ").strip() # Validate AWS region input while True: region = input("Enter AWS region: ").strip() try: validate_region(region) break except ValueError as e: console.print(f"[red]{e} Please enter a valid AWS region.[/red]") config = { "name": ( args.name.strip() if args.name else f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ), "cloudServiceProvider": cloud_provider, "exitStrategy": exit_strategy, "assessmentType": assessment_type, "providerDetails": { "accessKey": access_key, "secretKey": secret_key, "region": region, }, } except Exception as e: console.print(f"[red]Error during manual AWS configuration: {e}[/red]") return # Run the AWS assessment pipeline run_assessment(config, "aws") def handle_azure(args): config = {} cloud_provider = 1 if args.config: # logger.info(f"Azure --config argument detected with path: {args.config}") config = load_config(args.config) if not config: console.print("[red]Invalid or missing Azure configuration file.[/red]") return # Handle name field logic (priority: --name > config name > fallback) if args.name: config["name"] = args.name.strip() if "name" not in config or not config["name"].strip(): config["name"] = ( f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ) elif args.cli: # logger.info("Azure --cli argument detected. Using Azure CLI credentials.") # Check if az cli available if not is_azure_cli_installed(): # logger.error("Azure CLI is not installed.") console.print( "[red]Azure CLI is not installed. Install it from https://aka.ms/install-azure-cli.[/red]" ) return # Check if the user is logged in to Azure CLI if not is_azure_cli_logged_in(): # logger.error("User is not logged in to Azure CLI") console.print( "[red]You are not logged in to Azure CLI. Please run 'az login' and try again.[/red]" ) return # Check if the cli token is expired if is_azure_cli_token_expired(): # logger.error("Azure CLI token is expired.") console.print("[red]Your Azure CLI token has expired. Please run:[/red]") console.print( "[bold cyan]az login --scope https://management.azure.com/.default[/bold cyan]" ) return try: credential = DefaultAzureCredential() tenant_id = input("Enter Azure Tenant ID: ").strip() subscription_client = SubscriptionClient(credential) subscriptions = list(subscription_client.subscriptions.list()) if not subscriptions: logger.error( "No subscriptions found for the provided Azure credentials." ) console.print( "[red]No subscriptions found for the provided credentials.[/red]" ) return selected_subscription = select_subscription(subscriptions) subscription_id = selected_subscription.subscription_id resource_client = ResourceManagementClient(credential, subscription_id) resource_groups = list(resource_client.resource_groups.list()) if not resource_groups: logger.error("No resource groups found in the selected subscription.") console.print( "[red]No resource groups found in the selected subscription.[/red]" ) return resource_group_name = select_resource_group(resource_groups) exit_strategy, assessment_type = prompt_required_inputs() config = { "name": ( args.name.strip() if args.name else f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ), "cloudServiceProvider": cloud_provider, "exitStrategy": exit_strategy, "assessmentType": assessment_type, "providerDetails": { "credential": credential, "tenantId": tenant_id, "subscriptionId": subscription_id, "resourceGroupName": resource_group_name, }, } except Exception as e: logger.error(f"Error during Azure CLI processing: {e}", exc_info=True) console.print(f"[red]An error occurred: {e}[/red]") else: exit_strategy, assessment_type = prompt_required_inputs() tenant_id = input("Enter Azure Tenant ID: ").strip() client_id = input("Enter Service Principal / Client ID: ").strip() client_secret = input("Enter Client Secret: ").strip() try: # Authenticate using the provided credentials credential = ClientSecretCredential( tenant_id=tenant_id, client_id=client_id, client_secret=client_secret ) subscription_client = SubscriptionClient(credential) # Fetch and prompt the user to select a subscription subscriptions = list(subscription_client.subscriptions.list()) if not subscriptions: console.print( "[red]No subscriptions found. Please check your credentials.[/red]" ) return selected_subscription = select_subscription(subscriptions) subscription_id = selected_subscription.subscription_id # Fetch and prompt the user to select a resource group resource_client = ResourceManagementClient(credential, subscription_id) resource_groups = list(resource_client.resource_groups.list()) if not resource_groups: console.print( "[red]No resource groups found in the selected subscription.[/red]" ) return resource_group_name = select_resource_group(resource_groups) # Build the configuration config = { "name": ( args.name.strip() if args.name else f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ), "cloudServiceProvider": cloud_provider, "exitStrategy": exit_strategy, "assessmentType": assessment_type, "providerDetails": { "tenantId": tenant_id, "clientId": client_id, "clientSecret": client_secret, "subscriptionId": subscription_id, "resourceGroupName": resource_group_name, }, } except Exception as e: logger.error(f"Error during manual Azure configuration: {e}", exc_info=True) console.print(f"[red]An error occurred: {e}[/red]") return # Run the Azure assessment pipeline # logger.info("Starting Azure assessment pipeline.") run_assessment(config, "azure") def run_assessment(config, provider_name): # Record the assessment start time to propagate across stages started_at = int(time.time()) try: # Preliminary Stage: Validate configuration & create directory console.print("-------------------------------------------") console.print("Preliminary Stage", style="bold") try: validate_config(config) print_step("Configuration successfully validated.", status="ok") except ValueError as e: print_step("Configuration validation failed.", status="error", logs=str(e)) return # Detect ExitCloud Integration mode, jwt = resolve_mode() if mode == "online": print_step("ExitCloud integration configured.", status="ok") else: print_step("ExitCloud integration not configured.", status="warning") # Overwrite assessment type to basic if config["assessmentType"] != 1: print_step( "Forcing Basic Assessment due to offline mode.", status="warning" ) config["assessmentType"] = 1 # Create directories try: report_path, raw_data_path = create_directory() print_step("Directory successfully created.", status="ok") except RuntimeError as e: print_step("Directory creation failed.", status="error", logs=str(e)) return # Handle the result provider_name = ( "Microsoft Azure" if config["cloudServiceProvider"] == 1 else "AWS" if config["cloudServiceProvider"] == 2 else "Unknown" ) # Stage 1: Verify Credentials console.print("-------------------------------------------") console.print("Stage #1 - Validate Credentials", style="bold") # Test Connection connection_success, logs = verify_credentials( config["cloudServiceProvider"], config["providerDetails"] ) if connection_success: print_step(f"Connecting to {provider_name}...", status="ok") else: print_step(f"Connecting to {provider_name}...", status="error") console.print(f" ↳ {logs}", style="dim") logger.error(f"Credential verification failed: {logs}") return console.print("-------------------------------------------") # Stage 2: Test Permissions console.print("Stage #2 - Validate Permissions", style="bold") # Labels for permission types permission_reader_label = ( "Reader" if config["cloudServiceProvider"] == 1 else "ViewOnlyAccess" ) permission_cost_label = ( "Cost Management Reader" if config["cloudServiceProvider"] == 1 else "AWSBillingReadOnlyAccess" ) # Test permissions with spinners with console.status("Validating permissions...", spinner="dots"): permission_valid, permission_reader, permission_cost, logs = ( test_permissions( config["cloudServiceProvider"], config["providerDetails"] ) ) # Output results for permission checks if permission_reader: print_step(f"Checking {permission_reader_label}...", status="ok") else: print_step( f"Checking {permission_reader_label}...", status="error", logs=logs ) if permission_cost: print_step(f"Checking {permission_cost_label}...", status="ok") else: print_step( f"Checking {permission_cost_label}...", status="error", logs=logs ) # Exit if permissions are invalid if not permission_valid: logger.error(f"Permission validation failed: {logs}") return console.print("-------------------------------------------") # Stage 3: Build Resource Inventory console.print("Stage #3 - Build Resource Inventory", style="bold") # Use a spinner to indicate progress with console.status( f"Building resource inventory for {provider_name}...", spinner="dots" ): result = create_resource_inventory( config["cloudServiceProvider"], config["providerDetails"], report_path, raw_data_path, ) if result["success"]: print_step( f"Building resource inventory for {provider_name}...", status="ok" ) else: print_step( f"Building resource inventory for {provider_name}...", status="error", logs=result["logs"], ) return console.print("-------------------------------------------") # Stage 4: Build Cost Inventory console.print("Stage #4 - Build Cost Inventory", style="bold") # Use a spinner to indicate progress with console.status( f"Building cost inventory for {provider_name}...", spinner="dots" ): cost_result = create_cost_inventory( config["cloudServiceProvider"], config["providerDetails"], report_path, raw_data_path, ) # Handle the result if cost_result["success"]: print_step(f"Building cost inventory for {provider_name}...", status="ok") else: print_step( f"Building cost inventory for {provider_name}...", status="error", logs=cost_result["logs"], ) return console.print("-------------------------------------------") name = ( config.get("name") or f"Exit Assessment {datetime.now().strftime('%Y%m%d_%H%M%S')}" ) # Stage 5 – Online / Offline Risk Assessment if mode == "online": console.print("Stage #5 – Online Risk Assessment", style="bold") sync_result = sync_assessment( name=name, started_at=started_at, report_path=report_path, metadata={ "cloud_service_provider": config["cloudServiceProvider"], "exit_strategy": config["exitStrategy"], "assessment_type": config["assessmentType"], }, mode=mode, token=jwt, ) status = "ok" if sync_result["success"] else "error" print_step("Sync assessment...", status=status, logs=sync_result["logs"]) elif mode == "offline": console.print("Stage #5 – Offline Risk Assessment", style="bold") with console.status("Performing risk assessment...", spinner="dots"): risk_result = perform_risk_assessment( exit_strategy=config["exitStrategy"], report_path=report_path, mode=mode, ) status = "ok" if risk_result["success"] else "error" print_step( "Performing risk assessment...", status=status, logs=risk_result["logs"] ) console.print("-------------------------------------------") # Stage 6: Generate Report console.print("Stage #6 - Generate Report", style="bold") # Use a spinner to indicate progress with console.status("Generating report...", spinner="dots"): report_status = generate_report( config["cloudServiceProvider"], config["providerDetails"], config["exitStrategy"], config["assessmentType"], name, report_path, raw_data_path, ) # Handle the result if report_status["success"]: print_step("Generating report...", status="ok") else: print_step( "Generating report...", status="error", logs=report_status["logs"] ) return # Output the report path after the separator console.print("-------------------------------------------") console.print("Outputs:", style="bold") html_report_path = report_status.get("reports", {}).get("HTML") if html_report_path: console.print(f"HTML Report: {html_report_path}", style="cyan") pdf_report_path = report_status.get("reports", {}).get("PDF") if pdf_report_path: console.print(f"PDF Report: {pdf_report_path}", style="cyan") json_report_path = report_status.get("reports", {}).get("JSON") if html_report_path: console.print(f"JSON Report: {json_report_path}", style="cyan") console.print("-------------------------------------------") except Exception as e: console.print(f"[red]Unexpected error: {e}[/red]") def parse_arguments(): parser = argparse.ArgumentParser( description="EscapeCloud - Community Edition", epilog=( "Example usage:\n" " python3 main.py aws # Use manual input for AWS\n" " python3 main.py aws --config config.json # Use a configuration file for AWS\n" " python3 main.py aws --profile PROFILE # Use an AWS CLI profile\n" " python3 main.py aws --name 'DMS System' # Use a pre-defined assessment name\n" " python3 main.py azure # Use manual input for Azure\n" " python3 main.py azure --config config.json # Use a configuration file for Azure\n" " python3 main.py azure --cli # Use Azure CLI credentials\n" " python3 main.py azure --name 'DMS System' # Use a pre-defined assessment name\n" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) subparsers = parser.add_subparsers( dest="cloud_provider", help="Specify the cloud provider (aws or azure)." ) # Subparser for AWS aws_parser = subparsers.add_parser("aws", help="Perform an AWS assessment.") aws_group = aws_parser.add_mutually_exclusive_group(required=False) aws_group.add_argument( "--config", type=str, help="Path to the configuration file (JSON format)." ) aws_group.add_argument( "--profile", type=str, help="AWS profile name to use credentials from ~/.aws/credentials.", ) aws_parser.add_argument( "--name", type=str, help="Assessment Name (Optional / Max. 50 characters)." ) # Subparser for Azure azure_parser = subparsers.add_parser("azure", help="Perform an Azure assessment.") azure_group = azure_parser.add_mutually_exclusive_group(required=False) azure_group.add_argument( "--config", type=str, help="Path to the configuration file (JSON format)." ) azure_group.add_argument( "--cli", action="store_true", help="Use Azure CLI credentials for authentication.", ) azure_parser.add_argument( "--name", type=str, help="Assessment Name (Optional / Max. 50 characters)." ) return parser.parse_args() def main(): # Print ASCII art console.print(ascii_art, style="bold cyan") # Ensure latest dataset is available before proceeding initialize_dataset() args = parse_arguments() # Check if the cloud provider is specified if not args.cloud_provider: print_help_message() return # Dispatch based on provided arguments if args.cloud_provider == "aws": handle_aws(args) elif args.cloud_provider == "azure": handle_azure(args) else: console.print( "[red]Invalid command. Use 'aws' or 'azure' as the first argument.[/red]" ) console.print( "[green]Run 'python3 main.py --help' for usage instructions.[/green]" ) if __name__ == "__main__": try: main() except KeyboardInterrupt: console.print( "\n[bold yellow]Operation cancelled by user (Ctrl+C). Exiting gracefully.[/bold yellow]" ) # logger.warning("Process interrupted by user via KeyboardInterrupt.") sys.exit(0) except Exception as e: # logger.error(f"An unexpected error occurred: {e}", exc_info=True) console.print(f"[red]Unexpected error: {e}[/red]") sys.exit(1) ================================================ FILE: publiccode.yml ================================================ publiccodeYmlVersion: "0.4.0" name: cloudexit url: https://github.com/escapecloud/cloudexit landingURL: https://escapecloud.io platforms: - linux - mac - windows categories: - cloud-management developmentStatus: beta softwareType: standalone/backend description: en: shortDescription: Open-source tool for cloud exit assessments longDescription: Open-source tool for cloud exit assessments that helps organizations evaluate risks, dependencies, and alternative strategies before leaving a cloud provider. features: - cloud exit assessment - risk management - alternative technology analysis legal: license: AGPL-3.0-only maintenance: type: contract contractors: - name: Bence Daniel Hezso until: 2030-12-31 email: hello@escapecloud.io localisation: localisationReady: false availableLanguages: - en ================================================ FILE: requirements-dev.txt ================================================ -r requirements.txt black ruff ================================================ FILE: requirements.txt ================================================ azure-identity==1.25.3 azure-mgmt-resource==24.0.0 azure-mgmt-authorization==4.0.0 azure-mgmt-costmanagement==4.0.1 boto3==1.43.5 botocore==1.43.5 rich==15.0.0 Jinja2==3.1.6 reportlab==4.5.0 pillow==12.2.0 plotly==6.7.0 kaleido==1.3.0 requests==2.33.1 ================================================ FILE: tests/__init__.py ================================================ ================================================ FILE: tests/report_fixtures.py ================================================ import shutil from pathlib import Path def build_report_fixture(): metadata = { "name": "Smoke Test Assessment", "cloud_service_provider": 2, "exit_strategy": 1, "assessment_type": 1, "timestamp": "2026-05-07 12:00:00 UTC", } provider_details = { "accessKey": "AKIA_TEST", "secretKey": "SECRET_TEST", "region": "eu-central-1", } resource_type_mapping = { "101": { "id": 101, "code": "AWS.EC2.DescribeInstances.Reservations", "name": "EC2 Instance", "icon": "/icons/misc/no_image.png", } } resource_inventory = [ {"resource_type": 101, "location": "eu-central-1", "count": 2}, ] cost_data = [ {"month": "2025-11-01", "cost": 10.5, "currency": "USD"}, {"month": "2025-12-01", "cost": 12.0, "currency": "USD"}, {"month": "2026-01-01", "cost": 14.75, "currency": "USD"}, {"month": "2026-02-01", "cost": 11.25, "currency": "USD"}, {"month": "2026-03-01", "cost": 9.0, "currency": "USD"}, {"month": "2026-04-01", "cost": 13.4, "currency": "USD"}, ] risk_definitions = [ { "id": "1", "name": "Limited Alternatives", "description": "There are only a few alternatives available.", "severity": "high", } ] risk_data = [ {"resource_type": "101", "risk": "1"}, ] alternatives = [ {"resource_type": "101", "strategy_type": "1", "alternative_technology": 1}, ] alternative_technologies = [ { "id": 1, "product_name": "OpenStack", "product_description": "Open source cloud platform.", "product_url": "https://www.openstack.org/", "open_source": "t", "support_plan": "t", "status": "t", } ] return { "metadata": metadata, "provider_details": provider_details, "resource_type_mapping": resource_type_mapping, "resource_inventory": resource_inventory, "cost_data": cost_data, "risk_definitions": risk_definitions, "risk_data": risk_data, "alternatives": alternatives, "alternative_technologies": alternative_technologies, "exit_strategy": 1, } def build_empty_report_fixture(): metadata = { "name": "Empty State Assessment", "cloud_service_provider": 2, "exit_strategy": 1, "assessment_type": 2, "timestamp": "2026-05-08 10:00:00 UTC", } provider_details = { "accessKey": "AKIA_EMPTY", "secretKey": "SECRET_EMPTY", "region": "eu-central-1", } return { "metadata": metadata, "provider_details": provider_details, "resource_type_mapping": {}, "resource_inventory": [], "cost_data": [], "risk_definitions": [], "risk_data": [], "alternatives": [], "alternative_technologies": [], "exit_strategy": 1, } def stage_report_assets(report_path: str) -> None: report_assets = Path(report_path) / "assets" report_assets.mkdir(parents=True, exist_ok=True) source_assets = Path("assets") for folder in ("css", "img", "icons"): shutil.copytree( source_assets / folder, report_assets / folder, dirs_exist_ok=True, ) ================================================ FILE: tests/test_report_pipeline.py ================================================ import json import tempfile import unittest from pathlib import Path from core.utils_report import ( generate_html_report, generate_json_report, generate_pdf_report, ) from core.utils_report_json import transform_cost_inventory_for_json from tests.report_fixtures import ( build_empty_report_fixture, build_report_fixture, stage_report_assets, ) class ReportPipelineSmokeTests(unittest.TestCase): def test_generate_html_report_creates_expected_output(self): fixture = build_report_fixture() with tempfile.TemporaryDirectory() as report_dir: html_path = generate_html_report( report_dir, fixture["metadata"], fixture["resource_type_mapping"], fixture["resource_inventory"], fixture["cost_data"], None, fixture["risk_data"], fixture["risk_definitions"], fixture["alternatives"], fixture["alternative_technologies"], fixture["exit_strategy"], ) self.assertTrue(Path(html_path).exists()) html = Path(html_path).read_text(encoding="utf-8") self.assertIn("Smoke Test Assessment", html) self.assertIn("Amazon Web Services", html) self.assertIn("OpenStack", html) self.assertIn("EC2 Instance", html) def test_generate_html_report_renders_empty_state_output(self): fixture = build_empty_report_fixture() with tempfile.TemporaryDirectory() as report_dir: html_path = generate_html_report( report_dir, fixture["metadata"], fixture["resource_type_mapping"], fixture["resource_inventory"], fixture["cost_data"], None, fixture["risk_data"], fixture["risk_definitions"], fixture["alternatives"], fixture["alternative_technologies"], fixture["exit_strategy"], ) self.assertTrue(Path(html_path).exists()) html = Path(html_path).read_text(encoding="utf-8") self.assertIn("Empty State Assessment", html) self.assertIn("No risk data available.", html) self.assertIn("No cost data available.", html) self.assertIn("No exit score data available.", html) self.assertIn("No vendor lock-in score data available.", html) self.assertIn("No resources were discovered during the assessment.", html) self.assertIn("No alternative technologies are available", html) self.assertNotIn('id="risksChart"', html) self.assertNotIn('id="costsChart"', html) self.assertNotIn('id="exitScoreChart"', html) self.assertNotIn('id="vendorLockInScoreChart"', html) def test_generate_json_report_creates_expected_structure(self): fixture = build_report_fixture() with tempfile.TemporaryDirectory() as tmp_dir: raw_data_path = Path(tmp_dir) / "raw_data" raw_data_path.mkdir() json_path = generate_json_report( str(raw_data_path), fixture["metadata"], fixture["resource_type_mapping"], fixture["resource_inventory"], fixture["cost_data"], None, fixture["risk_data"], fixture["risk_definitions"], fixture["alternatives"], fixture["alternative_technologies"], fixture["exit_strategy"], ) payload = json.loads(Path(json_path).read_text(encoding="utf-8")) self.assertEqual(payload["meta"]["name"], "Smoke Test Assessment") self.assertEqual( payload["data"]["resource_inventory"][0]["resource_name"], "EC2 Instance" ) self.assertEqual(payload["data"]["cost_inventory"][0]["month"], "2025-11-01") self.assertEqual( payload["data"]["alternative_technologies"]["1"][0]["product_name"], "OpenStack", ) def test_generate_pdf_report_creates_non_empty_file(self): fixture = build_report_fixture() with tempfile.TemporaryDirectory() as report_dir: stage_report_assets(report_dir) pdf_path = generate_pdf_report( fixture["provider_details"], report_dir, fixture["metadata"], fixture["resource_type_mapping"], fixture["resource_inventory"], fixture["cost_data"], None, fixture["risk_data"], fixture["risk_definitions"], fixture["alternatives"], fixture["alternative_technologies"], fixture["exit_strategy"], ) pdf_file = Path(pdf_path) self.assertTrue(pdf_file.exists()) self.assertGreater(pdf_file.stat().st_size, 0) class ReportTransformTests(unittest.TestCase): def test_transform_cost_inventory_for_json_sorts_months(self): unsorted_costs = [ {"month": "2026-03-01", "cost": 9.0, "currency": "USD"}, {"month": "2026-01-01", "cost": 14.75, "currency": "USD"}, {"month": "2026-02-01", "cost": 11.25, "currency": "USD"}, ] transformed = transform_cost_inventory_for_json(unsorted_costs) self.assertEqual( [item["month"] for item in transformed], ["2026-01-01", "2026-02-01", "2026-03-01"], ) if __name__ == "__main__": unittest.main() ================================================ FILE: tests/test_report_transforms.py ================================================ import tempfile import unittest from core.utils_report_html import ( transform_alt_tech_for_html, transform_cost_inventory_for_html, transform_risk_inventory_for_html, ) from core.utils_report_json import ( transform_alt_tech_for_json, transform_resource_inventory_for_json, transform_risk_inventory_for_json, ) from core.utils_report_pdf import ( transform_alt_tech_for_pdf, transform_cost_inventory_for_pdf, transform_resource_inventory_for_pdf, transform_risk_inventory_for_pdf, ) def build_resource_type_mapping(): return { "101": { "id": 101, "code": "AWS.EC2.DescribeInstances.Reservations", "name": "EC2 Instance", "icon": "/icons/misc/no_image.png", }, "202": { "id": 202, "code": "AWS.S3.ListBuckets.Buckets", "name": "S3 Bucket", "icon": "/icons/misc/no_image.png", }, } def build_resource_inventory(): return [ {"resource_type": 101, "location": "eu-central-1", "count": 2}, {"resource_type": 202, "location": "eu-central-1", "count": 1}, ] def build_risk_definitions(): return [ { "id": "1", "name": "Limited Alternatives", "description": "There are only a few alternatives available.", "severity": "high", }, { "id": "7", "name": "Large Service Footprint", "description": "The service footprint is broad.", "severity": "medium", }, ] def build_risk_data(): return [ {"resource_type": "101", "risk": "1"}, {"resource_type": "202", "risk": "1"}, {"resource_type": "null", "risk": "7"}, ] def build_alternatives(): return [ {"resource_type": 101, "strategy_type": 1, "alternative_technology": 1}, {"resource_type": 101, "strategy_type": 3, "alternative_technology": 2}, {"resource_type": 202, "strategy_type": 1, "alternative_technology": 2}, ] def build_alternative_technologies(): return [ { "id": 1, "product_name": "OpenStack", "product_description": "Open source cloud platform.", "product_url": "https://www.openstack.org/", "open_source": "t", "support_plan": "t", "status": "t", }, { "id": 2, "product_name": "MinIO", "product_description": "Object storage platform.", "product_url": "https://min.io/", "open_source": "t", "support_plan": "f", "status": "t", }, { "id": 3, "product_name": "Inactive Tech", "product_description": "Should be ignored.", "product_url": "https://example.com/", "open_source": "t", "support_plan": "t", "status": "f", }, ] class HtmlTransformTests(unittest.TestCase): def test_transform_cost_inventory_for_html_sorts_and_sums_costs(self): months, cost_values, total_cost, currency_code, currency_symbol = ( transform_cost_inventory_for_html( [ {"month": "2026-02-01", "cost": 11.25, "currency": "USD"}, {"month": "2026-01-01", "cost": 14.75, "currency": "USD"}, ] ) ) self.assertEqual(months, ["Jan", "Feb"]) self.assertEqual(cost_values, [14.75, 11.25]) self.assertEqual(total_cost, 26.0) self.assertEqual(currency_code, "USD") self.assertEqual(currency_symbol, "$") def test_transform_risk_inventory_for_html_counts_overall_and_resource_risks(self): resource_inventory = { "101": {"name": "EC2 Instance"}, "202": {"name": "S3 Bucket"}, } risks, severity_counts = transform_risk_inventory_for_html( build_risk_data(), build_risk_definitions(), resource_inventory, ) self.assertEqual([risk["severity"] for risk in risks], ["high", "medium"]) self.assertEqual(risks[0]["impacted_resources_count"], 2) self.assertCountEqual( risks[0]["impacted_resources"], ["EC2 Instance", "S3 Bucket"] ) self.assertIsNone(risks[1]["impacted_resources_count"]) self.assertEqual(severity_counts, {"high": 1, "medium": 1, "low": 0}) def test_transform_alt_tech_for_html_filters_by_strategy_and_status(self): transformed = transform_alt_tech_for_html( build_resource_inventory(), build_alternatives(), build_alternative_technologies(), exit_strategy=1, ) self.assertEqual(len(transformed), 2) self.assertEqual(transformed[0]["product_name"], "OpenStack") self.assertEqual(transformed[1]["product_name"], "MinIO") self.assertTrue(transformed[0]["open_source"]) self.assertFalse(transformed[1]["support_plan"]) class JsonTransformTests(unittest.TestCase): def test_transform_resource_inventory_for_json_maps_names_and_codes(self): transformed = transform_resource_inventory_for_json( build_resource_inventory(), build_resource_type_mapping(), ) self.assertEqual(transformed[0]["resource_name"], "EC2 Instance") self.assertEqual( transformed[1]["code"], "AWS.S3.ListBuckets.Buckets", ) def test_transform_risk_inventory_for_json_maps_impacted_resource_ids(self): transformed = transform_risk_inventory_for_json( build_risk_data(), build_risk_definitions(), build_resource_inventory(), ) transformed_by_id = {item["id"]: item for item in transformed} self.assertCountEqual(transformed_by_id["1"]["impacted_resources"], [1, 2]) self.assertEqual(transformed_by_id["1"]["impacted_resources_count"], 2) self.assertIsNone(transformed_by_id["7"]["impacted_resources_count"]) def test_transform_alt_tech_for_json_groups_by_resource_id(self): transformed = transform_alt_tech_for_json( build_resource_inventory(), build_alternatives(), build_alternative_technologies(), exit_strategy=1, ) self.assertEqual(list(transformed.keys()), [1, 2]) self.assertEqual(transformed[1][0]["product_name"], "OpenStack") self.assertEqual(transformed[2][0]["product_name"], "MinIO") class PdfTransformTests(unittest.TestCase): def test_transform_cost_inventory_for_pdf_limits_to_last_six_months(self): months, costs, currency_symbol = transform_cost_inventory_for_pdf( [ {"month": "2025-10-01", "cost": 8.0, "currency": "USD"}, {"month": "2025-11-01", "cost": 10.5, "currency": "USD"}, {"month": "2025-12-01", "cost": 12.0, "currency": "USD"}, {"month": "2026-01-01", "cost": 14.75, "currency": "USD"}, {"month": "2026-02-01", "cost": 11.25, "currency": "USD"}, {"month": "2026-03-01", "cost": 9.0, "currency": "USD"}, {"month": "2026-04-01", "cost": 13.4, "currency": "USD"}, ] ) self.assertEqual(months, ["Nov", "Dec", "Jan", "Feb", "Mar", "Apr"]) self.assertEqual(costs, [10.5, 12.0, 14.75, 11.25, 9.0, 13.4]) self.assertEqual(currency_symbol, "$") def test_transform_risk_inventory_for_pdf_counts_resource_backed_risks(self): risks, severity_counts = transform_risk_inventory_for_pdf( build_risk_data(), build_risk_definitions(), build_resource_inventory(), ) risks_by_name = {item["name"]: item for item in risks} self.assertEqual( risks_by_name["Limited Alternatives"]["impacted_resources_count"], 2 ) self.assertEqual( risks_by_name["Large Service Footprint"]["impacted_resources_count"], 0 ) self.assertEqual(severity_counts, {"high": 1, "medium": 1, "low": 0}) def test_transform_resource_inventory_for_pdf_builds_report_relative_icon_paths( self, ): with tempfile.TemporaryDirectory() as report_dir: transformed = transform_resource_inventory_for_pdf( build_resource_inventory(), build_resource_type_mapping(), report_dir, ) self.assertEqual(transformed[0]["resource_name"], "EC2 Instance") self.assertTrue( transformed[0]["icon_url"].endswith("/assets/icons/misc/no_image.png") ) def test_transform_alt_tech_for_pdf_counts_matching_alternatives(self): with tempfile.TemporaryDirectory() as report_dir: transformed = transform_alt_tech_for_pdf( build_resource_inventory(), build_resource_type_mapping(), build_alternatives(), build_alternative_technologies(), exit_strategy=1, report_path=report_dir, ) self.assertEqual(transformed[0]["count"], 1) self.assertEqual(transformed[1]["count"], 1) self.assertTrue( transformed[0]["icon_url"].endswith("/assets/icons/misc/no_image.png") ) if __name__ == "__main__": unittest.main() ================================================ FILE: tests/test_utils_and_main.py ================================================ import json import tempfile import unittest from pathlib import Path from unittest.mock import patch import main from utils.utils import load_config class LoadConfigTests(unittest.TestCase): def test_load_config_returns_parsed_json(self): with tempfile.TemporaryDirectory() as tmp_dir: config_path = Path(tmp_dir) / "config.json" expected = { "cloudServiceProvider": 2, "assessmentType": 1, "providerDetails": {"region": "eu-central-1"}, } config_path.write_text(json.dumps(expected), encoding="utf-8") self.assertEqual(load_config(str(config_path)), expected) def test_load_config_returns_none_for_missing_file(self): with patch("utils.utils.console.print") as mock_print: result = load_config("/tmp/does-not-exist-config.json") self.assertIsNone(result) mock_print.assert_called_once() def test_load_config_returns_none_for_invalid_json(self): with tempfile.TemporaryDirectory() as tmp_dir: config_path = Path(tmp_dir) / "config.json" config_path.write_text("{invalid json", encoding="utf-8") with patch("utils.utils.console.print") as mock_print: result = load_config(str(config_path)) self.assertIsNone(result) mock_print.assert_called_once() class RunAssessmentPreValidationTests(unittest.TestCase): def test_invalid_config_stops_before_pipeline_side_effects(self): config = { "assessmentType": 99, "cloudServiceProvider": 2, "providerDetails": {}, } with ( patch( "main.validate_config", side_effect=ValueError("bad config") ) as mock_validate, patch("main.resolve_mode") as mock_resolve_mode, patch("main.create_directory") as mock_create_directory, patch("main.verify_credentials") as mock_verify_credentials, patch("main.print_step") as mock_print_step, patch("main.console.print"), ): result = main.run_assessment(config, "aws") self.assertIsNone(result) mock_validate.assert_called_once_with(config) mock_print_step.assert_called_once_with( "Configuration validation failed.", status="error", logs="bad config" ) mock_resolve_mode.assert_not_called() mock_create_directory.assert_not_called() mock_verify_credentials.assert_not_called() if __name__ == "__main__": unittest.main() ================================================ FILE: tests/test_validate.py ================================================ import unittest from utils.validate import validate_config, validate_region def build_aws_config(): return { "name": "Example Assessment", "assessmentType": 1, "cloudServiceProvider": 2, "exitStrategy": 1, "providerDetails": { "accessKey": "AKIA_TEST", "secretKey": "SECRET_TEST", "region": "eu-central-1", }, } def build_azure_config(): return { "name": "Example Assessment", "assessmentType": 2, "cloudServiceProvider": 1, "exitStrategy": 3, "providerDetails": { "tenantId": "tenant-id", "clientId": "client-id", "clientSecret": "client-secret", "subscriptionId": "subscription-id", "resourceGroupName": "resource-group", }, } class ValidateRegionTests(unittest.TestCase): def test_accepts_known_region(self): self.assertIsNone(validate_region("eu-central-1")) def test_rejects_unknown_region(self): with self.assertRaisesRegex(ValueError, "Invalid AWS region"): validate_region("moon-central-1") class ValidateConfigTests(unittest.TestCase): def test_accepts_valid_aws_config(self): self.assertTrue(validate_config(build_aws_config())) def test_accepts_valid_azure_service_principal_config(self): self.assertTrue(validate_config(build_azure_config())) def test_accepts_valid_azure_cli_config(self): config = build_azure_config() config["providerDetails"] = { "credential": object(), "tenantId": "tenant-id", "subscriptionId": "subscription-id", "resourceGroupName": "resource-group", } self.assertTrue(validate_config(config)) def test_rejects_azure_config_without_client_credentials(self): config = build_azure_config() del config["providerDetails"]["clientId"] del config["providerDetails"]["clientSecret"] with self.assertRaisesRegex( ValueError, "Missing required fields in providerDetails" ): validate_config(config) def test_rejects_invalid_assessment_type(self): config = build_aws_config() config["assessmentType"] = 9 with self.assertRaisesRegex(ValueError, "Invalid assessmentType"): validate_config(config) def test_rejects_non_integer_top_level_fields(self): config = build_aws_config() config["assessmentType"] = "basic" with self.assertRaisesRegex(ValueError, "must be integers"): validate_config(config) def test_rejects_invalid_name_characters(self): config = build_aws_config() config["name"] = "Bad/Name" with self.assertRaisesRegex( ValueError, "Assessment name contains invalid characters" ): validate_config(config) def test_rejects_too_long_name(self): config = build_aws_config() config["name"] = "a" * 51 with self.assertRaisesRegex(ValueError, "cannot exceed 50 characters"): validate_config(config) def test_rejects_aws_config_with_invalid_region(self): config = build_aws_config() config["providerDetails"]["region"] = "invalid-region" with self.assertRaisesRegex(ValueError, "Invalid AWS region"): validate_config(config) if __name__ == "__main__": unittest.main() ================================================ FILE: utils/aws.py ================================================ # utils/aws.py import logging import shutil import subprocess logger = logging.getLogger("main.utils.aws") def is_aws_cli_installed() -> bool: return shutil.which("aws") is not None def is_aws_profile_valid(profile: str) -> bool: try: subprocess.run( ["aws", "configure", "list", "--profile", profile], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) return True except subprocess.CalledProcessError: return False ================================================ FILE: utils/azure.py ================================================ # utils/azure.py import logging import shutil import subprocess from typing import List, Any from rich.console import Console from azure.identity import AzureCliCredential from azure.core.exceptions import ClientAuthenticationError logger = logging.getLogger("main.utils.azure") console = Console() def is_azure_cli_installed() -> bool: return shutil.which("az") is not None def is_azure_cli_logged_in() -> bool: try: # Run the 'az account show' command to check if the user is logged in subprocess.run( ["az", "account", "show"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) return True except subprocess.CalledProcessError: return False def is_azure_cli_token_expired() -> bool: credential = AzureCliCredential() try: credential.get_token("https://management.azure.com/.default") return False # Token is valid except ClientAuthenticationError as e: if "AADSTS700082" in str(e): return True # Token expired return False def select_subscription(subscriptions: List[Any]) -> Any: # logger.info("Listing available subscriptions for selection.") console.print("Available Subscriptions:") for idx, sub in enumerate(subscriptions, start=1): console.print(f"{idx}. {sub.display_name} ({sub.subscription_id})") while True: try: selection = int(input("Select a subscription by number: ").strip()) if not (1 <= selection <= len(subscriptions)): raise ValueError("Invalid subscription selection.") selected_subscription = subscriptions[selection - 1] # logger.info(f"Subscription selected: {selected_subscription.display_name} ({selected_subscription.subscription_id})") return selected_subscription except ValueError as e: logger.warning(f"Invalid subscription selection: {e}") console.print(f"[red]{e} Please select a valid number.[/red]") def select_resource_group(resource_groups: List[Any]) -> str: # logger.info("Listing available resource groups for selection.") console.print("Available Resource Groups:") for idx, rg in enumerate(resource_groups, start=1): console.print(f"{idx}. {rg.name}") while True: try: selection = int(input("Select a resource group by number: ").strip()) if not (1 <= selection <= len(resource_groups)): raise ValueError("Invalid resource group selection.") selected_resource_group = resource_groups[selection - 1].name # logger.info(f"Resource Group selected: {selected_resource_group}") return selected_resource_group except ValueError as e: logger.warning(f"Invalid resource group selection: {e}") console.print(f"[red]{e} Please select a valid number.[/red]") ================================================ FILE: utils/connection.py ================================================ # utils/connection.py from __future__ import annotations import logging import requests from typing import Tuple, Optional logger = logging.getLogger("main.utils.connection") try: import config except ModuleNotFoundError: config = None _AUTH_PATH = "/api/v1/auth/token/" def _build_url(host: str) -> str: host = host.strip().rstrip("/") if not host.startswith("http"): host = f"https://{host}" return f"{host}{_AUTH_PATH}" def get_jwt_token( host: str | None = None, key: str | None = None, *, timeout: int = 10 ) -> Optional[str]: host = host or getattr(config, "HOST", "") if config else "" key = key or getattr(config, "KEY", "") if config else "" if not host: logger.debug("HOST empty – skipping ExitCloud authentication.") return None if not key: logger.debug("KEY empty – skipping ExitCloud authentication.") return None url = _build_url(host) headers = {"Authorization": f"Bearer {key}"} try: resp = requests.post(url, headers=headers, timeout=timeout) resp.raise_for_status() data = resp.json() token = ( data.get("access_token") or data.get("token") or data.get("access") or data.get("jwt") ) if token: return token logger.error( "Authentication succeeded but token field missing in response: %s", data ) except requests.RequestException as exc: logger.error("EscapeCloud authentication request failed: %s", exc) except ValueError: logger.error("EscapeCloud authentication response was not valid JSON.") return None def resolve_mode() -> Tuple[str, Optional[str]]: host = getattr(config, "HOST", "") if config else "" key = getattr(config, "KEY", "") if config else "" if not host: logger.debug("HOST empty – running in offline mode.") return "offline", None if not key: logger.debug("KEY empty – running in offline mode.") return "offline", None token = get_jwt_token(host=host, key=key) if token: return "online", token logger.debug("ExitCloud auth failed – falling back to offline mode.") return "offline", None ================================================ FILE: utils/constants.py ================================================ # utils/constants.py REGION_CHOICES = [ ("us-east-1", "us-east-1 (N. Virginia)"), ("us-east-2", "us-east-2 (Ohio)"), ("us-west-1", "us-west-1 (N. California)"), ("us-west-2", "us-west-2 (Oregon)"), ("af-south-1", "af-south-1 (Cape Town)"), ("ap-east-1", "ap-east-1 (Hong Kong)"), ("ap-south-1", "ap-south-1 (Mumbai)"), ("ap-northeast-1", "ap-northeast-1 (Tokyo)"), ("ap-northeast-2", "ap-northeast-2 (Seoul)"), ("ap-northeast-3", "ap-northeast-3 (Osaka)"), ("ap-southeast-1", "ap-southeast-1 (Singapore)"), ("ap-southeast-2", "ap-southeast-2 (Sydney)"), ("ca-central-1", "ca-central-1 (Central)"), ("eu-central-1", "eu-central-1 (Frankfurt)"), ("eu-west-1", "eu-west-1 (Ireland)"), ("eu-west-2", "eu-west-2 (London)"), ("eu-west-3", "eu-west-3 (Paris)"), ("eu-south-1", "eu-south-1 (Milan)"), ("eu-north-1", "eu-north-1 (Stockholm)"), ("me-south-1", "me-south-1 (Bahrain)"), ("sa-east-1", "sa-east-1 (São Paulo)"), ] REQUIRED_FIELDS_AZURE = [ "clientId", "clientSecret", "tenantId", "subscriptionId", "resourceGroupName", ] REQUIRED_FIELDS_AWS = ["accessKey", "secretKey", "region"] ================================================ FILE: utils/data.py ================================================ # utils/data.py import os import gzip import shutil import hashlib import time import requests from typing import Optional from datetime import datetime, timedelta from pathlib import Path from requests.exceptions import RequestException, ConnectionError, Timeout # Constants DATASET_FOLDER = Path("datasets") REMOTE_STORAGE_URL = "https://cloudexit-oss-data-eu.fsn1.your-objectstorage.com" def get_monday_date() -> str: now = datetime.utcnow() monday = now - timedelta(days=now.weekday()) if now.weekday() == 0 and now.hour < 8: last_monday = monday - timedelta(days=7) return last_monday.strftime("cloudexit-%Y-%m-%d.db.gz") else: return monday.strftime("cloudexit-%Y-%m-%d.db.gz") def compute_file_hash(filepath: str) -> str: hash_sha256 = hashlib.sha256() with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest() def download_file(url: str, destination: str, retries: int = 3, delay: int = 5) -> bool: for attempt in range(retries): try: response = requests.get(url, stream=True, timeout=30) response.raise_for_status() with open(destination, "wb") as f: shutil.copyfileobj(response.raw, f) print(f"[INFO] Download successful: {destination}") return True except ConnectionError: print( f"[ERROR] Connection failed while downloading {url}. Retrying ({attempt + 1}/{retries})..." ) except Timeout: print( f"[ERROR] Request timed out while downloading {url}. Retrying ({attempt + 1}/{retries})..." ) except RequestException as e: print(f"[ERROR] Failed to download {url}: {e}") break time.sleep(delay) print(f"[ERROR] Unable to download file after {retries} attempts: {url}") return False def fetch_remote_checksum( checksum_url: str, retries: int = 3, delay: int = 5 ) -> Optional[str]: for attempt in range(retries): try: response = requests.get(checksum_url, timeout=10) response.raise_for_status() return response.text.strip().split()[0] except ConnectionError: print( f"[ERROR] Connection failed when fetching {checksum_url}. Retrying ({attempt + 1}/{retries})..." ) except Timeout: print( f"[ERROR] Request timed out when fetching {checksum_url}. Retrying ({attempt + 1}/{retries})..." ) except RequestException as e: print(f"[ERROR] Failed to fetch {checksum_url}: {e}") break time.sleep(delay) print(f"[ERROR] Unable to fetch remote checksum after {retries} attempts.") return None def initialize_dataset() -> None: DATASET_FOLDER.mkdir(exist_ok=True) latest_file = get_monday_date() latest_file_url = f"{REMOTE_STORAGE_URL}/{latest_file}" latest_checksum_url = f"{REMOTE_STORAGE_URL}/{latest_file}.sha256" latest_symlink_file = f"{REMOTE_STORAGE_URL}/cloudexit-latest.db.gz" latest_symlink_checksum_url = f"{REMOTE_STORAGE_URL}/cloudexit-latest.db.gz.sha256" local_db_path = DATASET_FOLDER / "data.db" local_compressed_path = DATASET_FOLDER / latest_file # Fetch checksum for the date-based file remote_checksum = fetch_remote_checksum(latest_checksum_url) if not remote_checksum: print(f"[INFO] Unable to fetch remote checksum from {latest_checksum_url}.") print(f"[INFO] Trying latest symlink from {latest_symlink_checksum_url}...") remote_checksum = fetch_remote_checksum(latest_symlink_checksum_url) latest_file_url = latest_symlink_file latest_file = "cloudexit-latest.db.gz" local_compressed_path = DATASET_FOLDER / latest_file if not remote_checksum: print("[ERROR] Unable to fetch any remote checksum. Skipping update.") else: # Check if local compressed file exists if local_compressed_path.exists(): local_checksum = compute_file_hash(local_compressed_path) if local_checksum == remote_checksum: print("[INFO] Local dataset is up-to-date. No download needed.") return else: print( "[INFO] Local dataset is outdated. Removing old files and downloading new dataset..." ) # Remove all old compressed and extracted files for file in DATASET_FOLDER.glob("cloudexit-*.db.gz"): os.remove(file) if local_db_path.exists(): os.remove(local_db_path) # Download and extract dataset if download_file(latest_file_url, local_compressed_path): print( f"[INFO] Download successful. Extracting dataset from {latest_file}..." ) with gzip.open(local_compressed_path, "rb") as f_in, open( local_db_path, "wb" ) as f_out: shutil.copyfileobj(f_in, f_out) print("[INFO] Dataset updated successfully.") if not any(DATASET_FOLDER.iterdir()): print("[ERROR] Dataset folder is empty! Cannot proceed without data.") exit(1) ================================================ FILE: utils/sync.py ================================================ # utils/sync.py from __future__ import annotations import logging import requests import config from typing import Optional, Dict, Any from utils.auth import get_jwt_token logger = logging.getLogger("main.utils.sync") _ASSESS_PATH = "/api/v1/assessments/" def _build_url(host: str) -> str: host = host.strip().rstrip("/") if not host.startswith("http"): host = f"https://{host}" return f"{host}{_ASSESS_PATH}" def submit_assessment( payload: Dict[str, Any], *, host: str | None = None, key: str | None = None, timeout: int = 10, ) -> Optional[requests.Response]: host = host or getattr(config, "HOST", "") if config else "" if not host: logger.warning("HOST not configured – skipping assessment sync.") return None token = get_jwt_token(host=host, key=key) if key else get_jwt_token(host=host) if not token: logger.warning("Could not obtain JWT – skipping assessment sync.") return None url = _build_url(host) headers = { "Authorization": f"Bearer {token}", "Content-Type": "application/json", } try: resp = requests.post(url, headers=headers, json=payload, timeout=timeout) logger.info("POST %s – status %s", url, resp.status_code) return resp except requests.RequestException as exc: logger.error("Assessment POST failed: %s", exc) return None ================================================ FILE: utils/utils.py ================================================ # utils/utils.py import os import logging import json from typing import Optional, Tuple, Dict, Any from rich.console import Console from rich.style import Style from time import sleep from datetime import datetime logger = logging.getLogger("main.utils") console = Console() def load_config(file_path: str) -> Optional[Dict[str, Any]]: try: # logger.info(f"Attempting to load config file from {file_path}") with open(file_path, "r") as f: config = json.load(f) # logger.info("Config file loaded successfully.") return config except Exception as e: logger.error(f"Error loading config file: {e}", exc_info=True) console.print(f"[red]Error loading config file: {e}[/red]") return None def prompt_required_inputs() -> Tuple[int, int]: while True: try: exit_strategy = int( input( "Enter Exit Strategy (1 for 'Repatriation to On-Premises', 3 for 'Migration to Alternate Cloud'): " ).strip() ) if exit_strategy not in [1, 3]: raise ValueError("Invalid exit strategy.") # logger.info(f"Exit Strategy selected: {exit_strategy}") break except ValueError as e: logger.warning(f"Invalid exit strategy input: {e}") console.print(f"[red]{e} Please enter 1 or 3.[/red]") while True: try: assessment_type = int( input( "Enter Assessment Type (1 for 'Basic', 2 for 'Standard'): " ).strip() ) if assessment_type not in [1, 2]: raise ValueError("Invalid assessment type.") # logger.info(f"Assessment Type selected: {assessment_type}") break except ValueError as e: logger.warning(f"Invalid assessment type input: {e}") console.print(f"[red]{e} Please enter 1 or 2.[/red]") return exit_strategy, assessment_type def print_step( description: str, status: str = "pending", logs: Optional[str] = None ) -> None: # Define styles for statuses ok_style = Style(color="green", bold=True) error_style = Style(color="red", bold=True) warning_style = Style(color="yellow", bold=True) # Map statuses to their visual representation status_map = { "ok": "[ ok ]", "error": "[ error ]", "warning": "[ warn ]", "pending": "[ ... ]", } # Handle the pending status with a spinner if status == "pending": with console.status( f"{description:<50} [yellow]{status_map['pending']}[/yellow]", spinner="dots", ): sleep(2) print_step(description, status="ok") elif status == "ok": console.print(f"{description:<50} {status_map['ok']}", style=ok_style) elif status == "warning": console.print(f"{description:<50} {status_map['warning']}", style=warning_style) if logs: console.print(f" ↳ {logs}", style="dim") elif status == "error": console.print(f"{description:<50} {status_map['error']}", style=error_style) if logs: console.print(f" ↳ {logs}", style="dim") ascii_art = r""" _ _ _ _ | | | | (_) | ___| | ___ _ _ __| | _____ ___| |_ / __| |/ _ \| | | |/ _` |/ _ \ \/ / | __| | (__| | (_) | |_| | (_| | __/> <| | |_ \___|_|\___/ \__,_|\__,_|\___/_/\_\_|\__| """ def create_directory(base_path="reports"): # Generate the main directory with a timestamp timestamp = datetime.now().strftime("%Y%m%d%H%M%S") directory_path = os.path.join(base_path, timestamp) # Create the main directory os.makedirs(directory_path, exist_ok=True) # Create the raw_data subdirectory within the main directory raw_data_path = os.path.join(directory_path, "raw_data") os.makedirs(raw_data_path, exist_ok=True) return directory_path, raw_data_path def print_help_message(): console.print("EscapeCloud - Community Edition", style="bold cyan") console.print("[green]Run the script with one of the following options:[/green]\n") console.print(" python3 main.py aws") console.print(" python3 main.py aws --config config/aws.json") console.print(" python3 main.py aws --profile PROFILE") console.print(" python3 main.py aws --name 'DMS System' ") console.print(" python3 main.py azure") console.print(" python3 main.py azure --config config/azure.json") console.print(" python3 main.py azure --cli") console.print(" python3 main.py azure --name 'DMS System'") ================================================ FILE: utils/validate.py ================================================ # utils/validate.py from typing import Dict, Any from .constants import REGION_CHOICES, REQUIRED_FIELDS_AZURE, REQUIRED_FIELDS_AWS def validate_region(region: str) -> None: valid_regions = [choice[0] for choice in REGION_CHOICES] if region not in valid_regions: raise ValueError(f"Invalid AWS region. Choose from: {', '.join(valid_regions)}") def validate_config(config: Dict[str, Any]) -> bool: try: # Cast key values to integers to handle string input gracefully assessment_type = int(config.get("assessmentType", 0)) cloud_service_provider = int(config.get("cloudServiceProvider", 0)) exit_strategy = int(config.get("exitStrategy", 0)) except ValueError: raise ValueError( "Invalid input: assessmentType, cloudServiceProvider, and exitStrategy must be integers." ) # Validate assessmentType if assessment_type not in [1, 2]: raise ValueError("Invalid assessmentType. Must be 1 (Basic) or 2 (Standard).") # Validate cloudServiceProvider if cloud_service_provider not in [1, 2]: raise ValueError("Invalid cloudServiceProvider. Must be 1 (Azure) or 2 (AWS).") # Validate exitStrategy if exit_strategy not in [1, 2, 3]: raise ValueError( "Invalid exitStrategy. Must be 1 (Repatriation to On-Premises), 2 (Hybrid Cloud Adoption) or 3 (Migration to Alternate Cloud)." ) # Validate name name = config.get("name", "").strip() if len(name) > 50: raise ValueError("Assessment name cannot exceed 50 characters.") if not all(c.isalnum() or c in " ._-()" for c in name): raise ValueError( "Assessment name contains invalid characters. Only letters, numbers, spaces, . _ - ( ) are allowed." ) # Validate providerDetails based on cloudServiceProvider provider_details = config.get("providerDetails", {}) if cloud_service_provider == 1: # Azure # Skip validation of clientId and clientSecret if using CLI credentials if provider_details.get("credential") is not None: required_fields = ["tenantId", "subscriptionId", "resourceGroupName"] else: required_fields = REQUIRED_FIELDS_AZURE missing_fields = [ field for field in required_fields if field not in provider_details ] elif cloud_service_provider == 2: # AWS missing_fields = [ field for field in REQUIRED_FIELDS_AWS if field not in provider_details ] if "region" in provider_details: validate_region(provider_details["region"]) else: raise ValueError( f"Invalid cloudServiceProvider: {cloud_service_provider}. Supported values: 1 (Azure), 2 (AWS)." ) if missing_fields: raise ValueError( f"Missing required fields in providerDetails: {', '.join(missing_fields)}" ) return True