Repository: UpstageAI/dataverse Branch: main Commit: a0adedc316a4 Files: 148 Total size: 464.3 KB Directory structure: gitextract_q82io2no/ ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── 1-bug-report.yml │ │ ├── 2-feature-request.yml │ │ ├── 3-documentation-improve.yml │ │ └── config.yml │ └── pull_request_template.md ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── LICENSE ├── Makefile ├── README.md ├── contribution/ │ └── CONTRIBUTING.md ├── dataverse/ │ ├── README.md │ ├── __init__.py │ ├── api/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── cli.py │ │ └── emr.py │ ├── config/ │ │ ├── README.md │ │ ├── __init__.py │ │ └── interface.py │ ├── etl/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── __sample/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── ducky.py │ │ │ └── github.py │ │ ├── bias/ │ │ │ ├── README.md │ │ │ └── __init__.py │ │ ├── cleaning/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── char.py │ │ │ ├── document.py │ │ │ ├── html.py │ │ │ ├── korean.py │ │ │ ├── length.py │ │ │ ├── number.py │ │ │ ├── table.py │ │ │ └── unicode.py │ │ ├── data_ingestion/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── arrow.py │ │ │ ├── common_crawl.py │ │ │ ├── csv.py │ │ │ ├── cultura_x.py │ │ │ ├── huggingface.py │ │ │ ├── parquet.py │ │ │ ├── red_pajama.py │ │ │ ├── slim_pajama.py │ │ │ └── test.py │ │ ├── data_save/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── aws.py │ │ │ ├── huggingface.py │ │ │ └── parquet.py │ │ ├── decontamination/ │ │ │ ├── README.md │ │ │ └── __init__.py │ │ ├── deduplication/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── common_crawl.py │ │ │ ├── exact.py │ │ │ ├── minhash.py │ │ │ └── polyglot.py │ │ ├── pii/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── card.py │ │ │ └── nin.py │ │ ├── pipeline.py │ │ ├── quality/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ └── language.py │ │ ├── registry.py │ │ ├── toxicity/ │ │ │ ├── README.md │ │ │ └── __init__.py │ │ └── utils/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── log.py │ │ ├── sampling.py │ │ └── statistics.py │ ├── lab/ │ │ ├── README.md │ │ └── __init__.py │ ├── tests/ │ │ ├── conftest.py │ │ ├── test_cleaning_accent.py │ │ ├── test_cleaning_char.py │ │ ├── test_cleaning_document.py │ │ ├── test_cleaning_html.py │ │ ├── test_cleaning_korean.py │ │ ├── test_cleaning_length.py │ │ ├── test_cleaning_number.py │ │ ├── test_cleaning_table.py │ │ ├── test_cleaning_unicode.py │ │ ├── test_deduplication_common_crawl.py │ │ ├── test_deduplication_exact.py │ │ ├── test_deduplication_minhash.py │ │ ├── test_deduplication_polyglot.py │ │ ├── test_pii_card.py │ │ └── test_pii_nin.py │ └── utils/ │ ├── README.md │ ├── __init__.py │ ├── analyze/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── pip.py │ │ └── python.py │ ├── api/ │ │ ├── README.md │ │ ├── __init__.py │ │ └── aws.py │ ├── format/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── huggingface.py │ │ └── ufl.py │ └── setting/ │ ├── README.md │ ├── __init__.py │ ├── system.py │ └── user.py ├── docs/ │ ├── Makefile │ ├── make.bat │ └── source/ │ ├── citation.rst │ ├── conf.py │ ├── config/ │ │ └── config.interface.rst │ ├── etl/ │ │ ├── etl.bias.rst │ │ ├── etl.cleaning.rst │ │ ├── etl.data_ingestion.rst │ │ ├── etl.data_save.rst │ │ ├── etl.decontamination.rst │ │ ├── etl.deduplication.rst │ │ ├── etl.pii.rst │ │ ├── etl.pipeline.rst │ │ ├── etl.quality.rst │ │ ├── etl.registry.rst │ │ ├── etl.rst │ │ ├── etl.toxicity.rst │ │ └── etl.utils.rst │ ├── index.rst │ ├── installation.rst │ ├── quickstart.rst │ └── requirements.txt ├── examples/ │ ├── README.md │ └── etl/ │ ├── ETL_01_how_to_run.ipynb │ ├── ETL_02_one_cycle.ipynb │ ├── ETL_03_create_new_etl_process.ipynb │ ├── ETL_04_add_new_etl_process.ipynb │ ├── ETL_05_test_etl_process.ipynb │ ├── ETL_06_scaleout_with_EMR.ipynb │ ├── EX_use_common_crawl_data.ipynb │ ├── EX_use_pyspark_ui.ipynb │ └── README.md ├── requirements.txt └── setup.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/ISSUE_TEMPLATE/1-bug-report.yml ================================================ name: "🐛 Bug Report" description: Create a new ticket for a bug. title: "🐛 [BUG] - " labels: [ "bug" ] body: - type: textarea id: environment-setting attributes: label: "Environment Settings" description: Java, Pyspark version, Python version, ... placeholder: Let us explain your environment settings to reproduce validations: required: true - type: textarea id: expected-behavior attributes: label: "Expected Behavior" placeholder: A clear and concise description of what you would expect to happen. validations: required: true - type: textarea id: actual-behavior attributes: label: "Actual Behavior" placeholder: A clear and concise description of what actually happened. - type: textarea id: reproduction attributes: label: Reproduction description: | Please enter an explicit steps to reproduce your problem. If you have any code snippets, error messages, and etc., please provide them here. placeholder: | Steps to reproduce: 1. 2. 3. 4. validations: required: true ================================================ FILE: .github/ISSUE_TEMPLATE/2-feature-request.yml ================================================ name: "🚀 Feature Request" description: Suggesting new desired feature and enhancement of existing feature title: "🚀 [REQUEST] - <title>" labels: [ "enhancement", "feature" ] body: - type: textarea id: feature-request attributes: label: Feature request description: | Please describe the feature you want to add or needs to be enhanced. If you have any related paper or code, please provide us. validations: required: true - type: textarea id: context validations: required: false attributes: label: Context description: | Please let us know your motivation or additional context for this suggestion. Knowing the reason why it needs to be add/enhanced makes us easy to understand the need. ================================================ FILE: .github/ISSUE_TEMPLATE/3-documentation-improve.yml ================================================ name: "📝 Documentation Improvement" description: Report wrong or missing documentation. You can suggest new document or document that needs any improvement. title: "📝 [Docs] - <title>" labels: [ "docs" ] body: - type: checkboxes attributes: label: dataverse version checks options: - label: > I have checked that the issue still exists on the latest versions of the _dataverse_. required: true - type: textarea id: location attributes: label: Location of the documentation description: > Please provide the location of the documentation. If you are suggesting new document, please provide appropriate place it has to be. validations: required: true - type: textarea id: problem attributes: label: Documentation problem description: > Please provide a description of what documentation you believe needs to be fixed/improved/added. validations: required: true - type: textarea id: suggestion attributes: label: Suggestion description: > Please explain the suggested fix and **why** it's better than the existing documentation. Or it could be content of new document you are suggesting. validations: required: true ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: true ================================================ FILE: .github/pull_request_template.md ================================================ ## PR Checklist Please check if your PR fulfills the following requirements: - [ ] The commit message follows _dataverse_ guidelines [link](https://github.com/UpstageAI/dataverse/blob/main/contribution/CONTRIBUTING.md#commit-guidelines): - [ ] Tests for the changes have been added (for bug fixes / features) - [ ] Docs have been added / updated (for bug fixes / features) ## What does this PR do? <!-- Please describe the link to a relevant issue and current behavior that you are modifying.--> - Issue Number: # - Description: ================================================ FILE: .gitignore ================================================ # forbidden .env reference/ common_crawl/ notebook/ .cache/ sample/ # open-source cc_net/ dps/ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide .pdm.toml # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ ================================================ FILE: .pre-commit-config.yaml ================================================ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.2.0 hooks: # - id: trailing-whitespace - id: check-added-large-files - id: detect-private-key - id: detect-aws-credentials args: [--allow-missing-credentials] - repo: https://github.com/pycqa/isort rev: 5.13.2 hooks: - id: isort args: [ --profile=black, ] - repo: https://github.com/psf/black rev: 23.12.1 hooks: - id: black args: [ --line-length=100, ] - repo: https://github.com/myint/autoflake rev: v2.2.0 hooks: - id: autoflake args: [ # --in-place, # --remove-unused-variables, # --remove-all-unused-imports, --expand-star-imports, ] - repo: https://github.com/PyCQA/flake8 rev: 6.0.0 hooks: - id: flake8 args: [ "--ignore=E203, E501, W503", ] # E203: Whitespace before ':' # E501: line length - because black checks and this makes error even on commented code # W503: PEP8 now recommends to break before binary operator (https://peps.python.org/pep-0008/#should-a-line-break-before-or-after-a-binary-operator) ================================================ FILE: .readthedocs.yaml ================================================ # .readthedocs.yml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-20.04 tools: python: "3.10" # You can also specify other tool versions: # nodejs: "19" # rust: "1.64" # golang: "1.19" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/source/conf.py # Build documentation with MkDocs #mkdocs: # configuration: mkdocs.yml # Optionally build your docs in additional formats such as PDF #formats: # - pdf # Optionally set the version of Python and requirements required to build your docs python: install: - requirements: docs/source/requirements.txt ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ .PHONY: aws_s3 pyspark java aws_s3: @test -d $$SPARK_HOME/jars || mkdir -p $$SPARK_HOME/jars @test -f $$SPARK_HOME/jars/hadoop-aws-3.3.4.jar || wget -P $$SPARK_HOME/jars/ https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/3.3.4/hadoop-aws-3.3.4.jar @test -f $$SPARK_HOME/jars/aws-java-sdk-bundle-1.12.592.jar || wget -P $$SPARK_HOME/jars/ https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-bundle/1.12.592/aws-java-sdk-bundle-1.12.592.jar pyspark: echo "export SPARK_HOME=$(shell pip show pyspark | grep Location | awk '{print $$2 "/pyspark"}')" >> ~/.bashrc echo "export PYSPARK_PYTHON=python3" >> ~/.bashrc # setting java environment java: sudo apt-get update sudo apt-get install openjdk-11-jdk echo "export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64" >> ~/.bashrc ================================================ FILE: README.md ================================================ <div align="center"> <br> <picture> <source media="(prefers-color-scheme: dark)" srcset="docs/images/dataverse_logo-white.png" width=300> <source media="(prefers-color-scheme: light)" srcset="docs/images/dataverse_logo-color.png" width=300> <img alt="DATAVERSE" src="docs/images/dataverse_logo-color.png" width=300> </picture> <br> The Universe of Data. All about Data, Data Science, and Data Engineering. </br> Upstage Solar is powered by Dataverse! Try at Upstage [Console](https://console.upstage.ai/)! [Docs](https://data-verse.gitbook.io/docs/) • [Examples](https://github.com/UpstageAI/dataverse/tree/main/examples) • [API Reference](https://data-verse.readthedocs.io/en/latest/) • [FAQ](https://data-verse.gitbook.io/docs/documents/faqs) • [Contribution Guide](https://github.com/UpstageAI/dataverse/blob/main/contribution/CONTRIBUTING.md) • [Contact](mailto:dataverse@upstage.ai) • [Discord](https://discord.gg/aAqF7pyq4h) • [Paper](https://arxiv.org/abs/2403.19340) <br><br> <div align="left"> ## Welcome to Dataverse! Dataverse is a freely-accessible open-source project that supports your **ETL(Extract, Transform and Load) pipeline with Python**. We offer a simple, standardized and user-friendly solution for data processing and management, catering to the needs of data scientists, analysts, and developers in LLM era. Even though you don't know much about Spark, you can use it easily via _dataverse_. ### With Dataverse, you are empowered to - utilize a range of preprocessing functions without the need to install multiple libraries. - create high-quality data for analysis and training of Large Language Models (LLM). - leverage Spark with ease, regardless of your expertise level. - facilitate smoother collaboration among users with varying degress of Spark proficiency. - enjoy freedom from the limitations of local environments by harnessing the capabilities of AWS EMR. ### Architecture of Dataverse ![Architecture of Dataverse](./docs/images/dataverse_system_architecture_white.jpeg) ### Key Features of Dataverse - **Block-Based**: In Dataverse, a `block` means a `registered ETL function` which is running on Spark. You can build Spark code like putting together puzzle pieces. You can easily add, take away, or re-arrange pieces to get the results you want via configure. - **Configure-Based**: All the setups for Spark and steps of block can be defined with configure. You don't need to know all the code. Just set up the options, and you're good to go. - **Extensible**: It's designed to meet your specific demands, allowing for custom features that fit perfectly with your project. If you want to know more about Dataverse, please checkout our [docs](https://data-verse.gitbook.io/docs/). By clicking below image, it'll take you to a short intro video! [![Brief Introduction](./docs/images/dataverse_hero.png)](https://youtu.be/yYyyLuPNK5s?feature=shared) <br> ## 🌌 Installation ### 🌠 Prerequisites To use this library, the following conditions are needed: - Python (version between 3.10 and 3.11) - JDK (version 11) - PySpark Detail installation guide for prerequisites can be found on [here](https://data-verse.gitbook.io/docs/installation). ### 🌠 Install via PyPi ```bash pip install dataverse ``` <br> ## 🌌 Quickstart Various and more detailed tutorials are [here](https://github.com/UpstageAI/dataverse/tree/main/examples). - [add_new_etl_process.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_04_add_new_etl_process.ipynb) : If you want to use your custom function, you have to register the function on Dataverse. This will guide you from register to apply it on pipeline. - [test_etl_process.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_05_test_etl_process.ipynb) : When you want to get test(sample) data to quickly test your ETL process, or need data from a certain point to test your ETL process. - [scaleout_with_EMR.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_06_scaleout_with_EMR.ipynb) : For people who want to run their pipeline on EMR cluster. <details> <summary><u>Detail to the example etl configure.</u></summary> <ul></ul> <ul> <li style="line-height:250%;"> <b>data_ingestion___huggingface___hf2raw </b></li> Load dataset from <a href="https://huggingface.co/datasets/allenai/ai2_arc">Hugging Face</a>, which contains a total of 2.59k rows. </ul> <ul> <li style="line-height:250%;"> <b>utils___sampling___random </b></li> To decrease the dataset size, randomly subsample 50% of data to reduce the size of dataset, with a default seed value of 42. <br/> This will reduce the dataset to 1.29k rows. </ul> <ul> <li style="line-height:250%;"> <b>deduplication___minhash___lsh_jaccard </b></li> Deduplicate by <code>question</code> column, 5-gram minhash jaccard similarity threshold of 0.1. </ul> <ul> <li style="line-height:250%;"> <b>data_save___parquet___ufl2parquet </b></li> Save the processed dataset as a Parquet file to <code>./guideline/etl/sample/quickstart.parquet</code>.<br/> The final dataset comprises around 1.14k rows. </ul> </details> ```python # 1. Set your ETL process as config. from omegaconf import OmegaConf ETL_config = OmegaConf.create({ # Set up Spark 'spark': { 'appname': 'ETL', 'driver': {'memory': '4g'}, }, 'etl': [ { # Extract; You can use HuggingFace datset from hub directly! 'name': 'data_ingestion___huggingface___hf2raw', 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']} }, { # Reduce dataset scale 'name': 'utils___sampling___random', 'args': {'sample_n_or_frac': 0.5} }, { # Transform; deduplicate data via minhash 'name': 'deduplication___minhash___lsh_jaccard', 'args': {'threshold': 0.1, 'ngram_size': 5, 'subset': 'question'} }, { # Load; Save the data 'name': 'data_save___parquet___ufl2parquet', 'args': {'save_path': './guideline/etl/sample/quickstart.parquet'} } ] }) ``` Above code block is an example of an ETL process in Dataverse. In Dataverse, the available registered ETL functions are referred to as `blocks`, and this example is comprised of four blocks. You can freely combine these blocks using config to create the ETL processes for your needs. The list of available functions and args of them can be found in the [API Reference](https://data-verse.readthedocs.io/en/latest/). Each functions 'args' should be added in dictionary format. ```python # 2. Run ETLpipeline. from dataverse.etl import ETLPipeline etl_pipeline = ETLPipeline() spark, dataset = etl_pipeline.run(config=ETL_config, verbose=True) ``` ETLPipeline is an object designed to manage the ETL processes. By inserting `ETL_config` which is defined in the previous step into ETLpipeline object and calling the `run` method, stacked ETL blocks will execute in the order they were stacked. ```python # 3. Result file is saved on the save_path ``` As the example gave `save_path` argument to the last block of `ETL_config`, data passed through the process will be saved on the given path. <br> ## 🌌 Modules Currently, about 50 functions are registered as the ETL process, which means they are eagerly awaiting your use! | Type | Package | description | |-----------|-----------------|---------------------------------------------------------------------------------------------------| | Extract | data_ingestion | Loading data from any source to the preferred format | | Transform | bias | (WIP) Reduce skewed or prejudiced data, particularly data that reinforce stereotypes. | | | cleaning | Remove irrelevant, redundant, or noisy information, such as stop words or special characters. | | | decontamination | (WIP) Remove contaminated data including benchmark. | | | deduplication | Remove duplicated data, targeting not only identical matches but also similar data. | | | pii | PII stands for Personally Identifiable Information. Removing sensitive information from data. | | | quality | Improving the data quality, in the perspective of accuracy, consistency, and reliability of data. | | | toxicity | (WIP) Removing harmful, offensive, or inappropriate content within the data. | | Load | data_save | Saving the processed data to a preferred source like data lake, database, etc. | | Utils | utils | Essential tools for data processing, including sampling, logging, statistics, etc. | <br> ## 🌌 Dataverse supports AWS Dataverse works with AWS S3 and EMR, enabling you to load and save data on S3 and execute ETL pipelines through EMR. Step by step guide to setting up is [here](https://data-verse.gitbook.io/docs/lets-start/aws-s3-support). </br> ## 🌌 Dataverse use-case > If you have any use-cases of your own, please feel free to let us know. </br>We would love to hear about them and possibly feature your case. *✨* [`Upstage`](https://www.upstage.ai/) is using Dataverse for preprocessing the data for the training of [Solar Mini](https://console.upstage.ai/services/solar?utm_source=upstage.ai&utm_medium=referral&utm_campaign=Main+hero+Solar+card&utm_term=Try+API+for+Free&utm_content=home). </br> *✨* [`Upstage`](https://www.upstage.ai/) is using Dataverse for preprocessing the data for the [Up 1T Token Club](https://en.content.upstage.ai/1tt). ## 🌌 Contributors <a href="https://github.com/UpstageAI/dataverse/graphs/contributors"> <img src="https://contrib.rocks/image?repo=UpstageAI/dataverse" /> </a> ## 🌌 Acknowledgements Dataverse is an open-source project orchestrated by the **Data-Centric LLM Team** at [`Upstage`](https://www.upstage.ai/), designed as an data ecosystem for LLM(Large Language Model). Launched in March 2024, this initiative stands at the forefront of advancing data handling in the realm of LLM. ## 🌌 License Dataverse is completely freely-accessible open-source and licensed under the Apache-2.0 license. ## 🌌 Citation If you want to cite our 🌌 Dataverse project, feel free to use the following bibtex. You can check our paper via [link](https://arxiv.org/abs/2403.19340). ```bibtex @misc{park2024dataverse, title={Dataverse: Open-Source ETL (Extract, Transform, Load) Pipeline for Large Language Models}, author={Hyunbyung Park and Sukyung Lee and Gyoungjin Gim and Yungi Kim and Dahyun Kim and Chanjun Park}, year={2024}, eprint={2403.19340}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ================================================ FILE: contribution/CONTRIBUTING.md ================================================ # __Contribution Guidelines__ Welcome to _Dataverse_! We warmly welcome any kind of contribution 😊✨. </br> This page provides an outline on how to contribute to _Dataverse_ and suggestions for nice conventions to follow. > __These are guidelines, NOT rules 💡__ <p> This page is not the Constituion of the _Dataverse_. We are providing guidelines to help you make a useful and efficient contribution to _Dataverse_. While we think these guidelines are sensible and we appreciate when they are observed, following them isn't strictly required. We hope you won't be tired by these guidelines. Also, we'd love to hear your ideas on how to improve our guidelines! </br> # Table of Contents - [Questions or Feedback](#questions-or-feedback) - [🤝 How to Contribute?](#how-to-contribute) - [Tests](#tests) - [Directory of Dataverse](#directory-of-dataverse) - [Design Philosophy](#design-philosophy) - [Commit Guidelines](#commit-guidelines) - [Style Guides](#style-guides) </br> # Questions or Feedback Join the conversation on our GitHub discussion board! It's the go-to spot for questions, chats, and a helping hand from the _Dataverse_ community. Drop by and say hello here: [link](https://github.com/UpstageAI/dataverse/discussions) And if there's a shiny new feature you're dreaming of, don't be shy—head over to our [issue page](https://github.com/UpstageAI/dataverse/issues) to let us know! Your input could help shape the future. ✨ </br> # How to Contribute? - Any kind of improvement of document: fixing typo, enhancing grammar or semantic structuring or adding new examples. - Submit issues related to bugs, new desired features, or enhancement of existing features. - Fix a bug, implement new feature or improving existing feature. - Answer other users' question or help. ## __Documentation__ We appreciate all the pull requests to fix typo / improve grammar or semantic structuring of documents. Feel free to check! <br/> Our API reference page is constructed with [Sphinx](https://www.sphinx-doc.org/en/master/). We adhere to the [Google style for docstrings](https://google.github.io/styleguide/pyguide.html) as a fundamental practice, so please follow this format. The source files are located within the `docs/source/` directory. ## __Report a Bug / Request New Feature / Suggest Enhancements__ Please open an issue whenever you find a bug or have an idea to enhance _Dataverse_. Maintainers will label it or leave comment on it as soon as they check the issue. Issues labeled as `Open for contribution` mean they are open for contribution. ## __Fix a Bug / Add New Feature / Improve Existing Feature__ If you have a particular roadmap, goals, or new feature, share it via issue. When you already fixed a bug or have new feature that enhances _Dataverse_, you can jump on to fourth step which is opening pull requests. Please note that when you open pull requests without opening an issue or maintainers' check, it can be declined if it does not aligh with philosophy of _Dataverse_. ### __1️⃣ Check issues labeled as__ `Open for contribution` You can find issues waiting for your contribution by filtering label with `Open for contribution`. This label does not stand alone. It is always with `Bug`, `Docs` or `Enhancement`. Issues with `Critical` or `ASAP` label are more urgent. ### __2️⃣ Leave a comment on the issue you want to contribute__ Once we review your comment, we'll entrust the issue to you by swapping out the `Open for contribution` label for a `WIP` (Work in Progress) label. ### __3️⃣ Work on it__ Before diving into coding, do take a moment to familiarize yourself with our coding style by visiting this [style guides](#style-guides). And hey, if you hit a snag while tackling the issue, don't hesitate to drop a comment right there. Our community is a supportive bunch and will jump in to assist or brainstorm with you. 1. Fork the repository of _Dataverse_. 2. Clone your fork to your local disk. 3. Create a new branch to hold your develompment changes. </br> It's not required to adhere strictly to the branch naming example provided; consider it a mild suggestion. ``` git checkout -b {prefix}/{issue-number}-{description} ``` 4. Set up a development environment 5. Develop the features in your branch ### __4️⃣ Create a Pull Request__ Go ahead and visit your GitHub fork, then initiate a pull request — it's time to share your awesome work! Before you do, double-check that you've completed everything on the checklist we provided. Once you're all set, submit your contributions for the project maintainers to review. Don't worry if the maintainers have some feedback or suggest changes—it's all part of the process and happens to even our most experienced contributors. Keep your updates flowing by working in your local branch and pushing any new changes to your fork. Your pull request will update automatically for everyone to see the progress. </br> # Tests The Dataverse test framework is built using [pytest](https://docs.pytest.org/en/8.0.x/). Ensure that you write a corresponding test for any new features or changes you make. You'll find the test files in the `dataverse/dataverse/tests` directory. - Create a new test file if you've introduced a new category or a sub-category for the ETL process. - If your addition is a new feature within an existing category or sub-category, include your tests in the existing test file. </br> # Directory of Dataverse For _Dataverse_'s overarching goals: check the [docs](https://data-verse.gitbook.io/docs#future-work) ```{plain text} 📦 dataverse/dataverse ┣ 📂 api ┣ 📂 config ┃ ┣ 📂 etl ┃ ┃ ┗ 📂 sample ┣ 📂 etl ┃ ┣ 📂 {CATEGORY} ┣ 📂 lab ┣ 📂 tests ┗ 📂 utils ``` - [`📂 api`](https://github.com/UpstageAI/dataverse/tree/main/dataverse/api): The Dataverse API serves as a gateway for users. - [`📂 config`](https://github.com/UpstageAI/dataverse/tree/main/dataverse/config): Contains configuration files for the Dataverse application. You can also find sample configuration file for etl process under this directory. - [`📂 etl`](https://github.com/UpstageAI/dataverse/tree/main/dataverse/etl): Main directory of _Dataverse_ where all of the data processors are placed. Data processors are separated with it's category. - [`📂 lab`](https://github.com/UpstageAI/dataverse/tree/main/dataverse/lab): TBD. Data analysis will be supported via here. - [`📂 tests`](https://github.com/UpstageAI/dataverse/tree/main/dataverse/tests): Pytest files - [`📂 utils`](https://github.com/UpstageAI/dataverse/tree/main/dataverse/utils): The Utilities module functions as a collection of internal helper tools. Its key features include API utilities that simplify interaction with various external APIs, including AWS EMR. Please be aware that another utils module is also included within the etl module. </br> # Design Philosophy - [Principles for Configuration](#principles-for-configuration) - [Principles for ETL Process](#principles-for-etl-process) ## Principles for Configuration 1. `One file` rules `ALL` 2. `10 Seconds` to know what is going on #### 1. `One file` rules `ALL` One cycle of ETL, Analyzer, etc. which we could call one job, will be controled by one configuration file. We are not going to use multiple configuration files to composite one big configuration file. #### 2. `10 Seconds` to know what is going on The reader should be able to know what is going on in the configuration file within 10 seconds. This is to make sure the configuration file is easy and small enough to read and understand. ## Principles for ETL Process > When you create your own ETL process, you should follow the following principles 1. No `DRY` (Don't Repeat Yourself) 2. One file Only #### 1. No `DRY` (Don't Repeat Yourself) > No `DRY` is applied between **ETL sub-categories**. - So if similar ETL processes are used in same sub-categories, it could be shared. - But if it's used in different sub-categories, it should not be shared. As you can see in the following example, there are 2 ETL processes `common_process_a` and `common_process_b`seems nice to be shared. But as you can see, they are not shared. They are repeated. This is because of the No `DRY` principle. ```python - deduplication/ - exact.py - "def common_process_a():" - "def common_process_b():" - def deduplication___exact___a(): - exact_datasketch.py - "def common_process_a():" - "def common_process_b():" - def deduplication___exact_datasketch___a(): - def deduplication___exact_datasketch___b(): ``` #### 2. One file Only Code that ETL process uses should be in the same file. This is because of the `One file Only` principle. Except **ETL Base class, few required utils functions, and open sources** there should be no dependency outside the file. ```python # This is OK ✅ - deduplication/ - exact.py - def helper_a(): - def helper_b(): - def etl_process(): helper_a() helper_b() # This is not allowed ❌ - deduplication/ - helper.py - def helper_a(): - def helper_b(): - exact.py from helper import helper_a from helper import helper_b - def etl_process(): helper_a() helper_b() ``` ETL process itself is meant to be built to be used in various combination of ETL pipeline **So try to make it as generic as possible.** </br> # Commit Guidelines ### Commit strategy - Avoid mixing multiple, unrelated modifications in a single commit. One commit is related with one issue. - Each commit should encapsulate a complete, autonomous upgrade to the code. ### Commit messages Please make sure your commit messages follow `type`: `title (#<related issue number>)` format. <br/> For example: ```plain text <TYPE>: Short summary with 72 characters or less (#<Issue number>) If you have more detalied explanatory text, put it as body. But the body is optional. ``` - Find adequate type in the below list: - `NEW`: introducing a new feature - `ENHANCE`: improve an existing code/feature. - `FIX`: fix a code bug - `DOCS`: write/update/add any kind of documents including docstring - `REFACTOR`: refactor existing code without any specific improvements - `STYLE`: changes that do not affect the meaning of the code (ex. white-space, line length) - `TEST`: add additional testing - `DEL`: remove code or files - `RELEASE`: release new version of dataverse - `OTHER`: anything not covered above (not recommended) - Use the present tense ("Add feature" not "Added feature") - Do not end the subject line with a punctuation </br> # Style Guides ### Pre-commit hook We provide a pre-commit git hook for style check. You can find exact check list in this [file](https://github.com/UpstageAI/dataverse/blob/main/.pre-commit-config.yaml). <br/> Please run the code below before a commit is created: ```bash pre-commit run ``` ================================================ FILE: dataverse/README.md ================================================ # Dataverse > The Universe of Data ## 🌌 Config > Config for the Dataverse ## 🌌 API > Interface of Dataverse for external use ## 🌌 ETL > ETL pipeline (Extract, Transform, Load) ## 🌌 LAB > Data Analysis & Visualization ## 🌌 Utils > Common utilities used internally for Dataverse ================================================ FILE: dataverse/__init__.py ================================================ ================================================ FILE: dataverse/api/README.md ================================================ # API (Application Programming Interface) > Interface with ease and efficiency ================================================ FILE: dataverse/api/__init__.py ================================================ ================================================ FILE: dataverse/api/cli.py ================================================ """ main entry point for the dataverse CLI tool """ from dataverse.utils.setting import SystemSetting def main(): """Main entry point for the cli.""" print("🌌 Hello Welcome to Dataverse! 🌌") print("=" * 50) print("We are still under construction for CLI!") print("=" * 50) print("QUARK - By Ducky 🦆") # set the system setting to CLI mode SystemSetting().IS_CLI = True ================================================ FILE: dataverse/api/emr.py ================================================ """ API to use AWS EMR with spark-submit """ import os import argparse import importlib from dataverse.etl import ETLPipeline def import_dynamic_etls(): """ Import dynamic etls which was created by user. """ dynamic_etl_path = "/home/hadoop/dataverse/dynamic_etl" try: files = os.listdir(dynamic_etl_path) except FileNotFoundError: return except Exception as e: raise e # Filter out non-Python files files = [f for f in files if f.endswith('.py')] # Dynamically import all Python files in the directory for file in files: file_path = os.path.join(dynamic_etl_path, file) # Remove .py at the end module_name = file[:-3] spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) def main(config, verbose=False): """Main entry point for the aws emr.""" etl_pipeline = ETLPipeline() import_dynamic_etls() spark, data = etl_pipeline.run(config=config, verbose=verbose) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", help="config file path") parser.add_argument("--verbose", action='store_true') args = parser.parse_args() main(args.config, args.verbose) ================================================ FILE: dataverse/config/README.md ================================================ # Configuration > This directory contains configuration files for the Dataverse application ## 🌌 How to use ### 🌠 Load pre-built configuration > you can load the pre-built configuration from path, or dict, or OmegaConf #### Load from local path ```python from dataverse.config import Config config = Config.load('path/to/config.yaml') ``` #### Load from AWS S3 > you need to set aws credential with `aws configure` to use this feature ```python from dataverse.config import Config config = Config.load('s3://path/to/config.yaml') ``` #### Load from dict ```python config = Config.load({ "spark": {"appname": "README.md example"} "etl": [ {"name": "...", "args": "..."}, {"name": "...", "args": "..."}, ] }) ``` ### 🌠 Set the empty args with `default` value > the args you already set will not be changed to default ```python from dataverse.config import Config config = Config.load('path/to/config.yaml') config = Config.set_default(config) ``` ### 🌠 Get `Default` configuration > `default` configuration has no `etl` pre-defined ```python from dataverse.config import Config config = Config.default() ``` ## 🌌 About Configuration ### 🌠 Why configuration is just `OmegaConf`? > To make it simple and easy to use. We are not going to inherit some other `base` class to make it complicated. But still `Config` interface is provided as a helper for to load, save, set default, etc. ### 🌠 2 Rules for configuration 1. `One file` rules `ALL` 2. `10 Seconds` to know what is going on #### `One file` rules `ALL` One cycle of ETL, Analyzer, etc. which we could call one job, will be controled by one configuration file. We are not going to use multiple configuration files to composite one big configuration file. #### `10 Seconds` to know what is going on The reader should be able to know what is going on in the configuration file within 10 seconds. This is to make sure the configuration file is easy and small enough to read and understand. ### 🌠 What open source to choose for configuration? > **`omegaconf`** - `OmegaConf` - For ease understanding & usage - Omegaconf supports yaml, dict, json and even `dataclass` from python. - `hydra` - hydra was also our candidate but to make it simple we are using OmegaConf. - hydra requires multiple configuration files to composite one big configuration file - also many people find out using hydra itself took quite a time just to understand ================================================ FILE: dataverse/config/__init__.py ================================================ from .interface import Config ================================================ FILE: dataverse/config/interface.py ================================================ """ Interface to check & load the configurations for installation environment awesome_config = Config.load("/path/to/ducky_awesome_config.yaml") awesome_config = Config.load({awesome: config}) """ import re import boto3 from pathlib import Path from typing import Union from omegaconf import OmegaConf from omegaconf import DictConfig from dataverse.utils.setting import SystemSetting from dataverse.utils.api import aws_s3_read from dataverse.utils.api import aws_s3_write from pathlib import Path class Config: """ Interface to check & load the configurations This class provides a lightweight wrapper for OmegaConf and allows checking and loading configurations. It supports loading configurations from various sources such as files, AWS S3, and config strings. The class also provides methods for saving configurations and setting default values for missing config arguments. """ def __new__(cls, *args, **kwargs): raise NotImplementedError("Config is not allowed to be instantiated") @classmethod def load(cls, config: Union[str, dict, DictConfig, OmegaConf, Path]): """ Load the configuration for the etl. Args: config (Union[str, dict, OmegaConf]): The configuration for the etl. - str or Path: This could have several cases: - Path to the config file. - S3 path to the config file. - Config string. This is similar to loading a `yaml` file with `open()`. - dict: Config dictionary. - OmegaConf: Config object. Returns: The loaded configuration. Raises: ValueError: If the provided config is not a valid path or S3 path. TypeError: If the provided config is not of type str, dict, or OmegaConf. """ if isinstance(config, (str, Path)): if isinstance(config, Path): config = str(config) # Local File if Path(config).is_file(): config = OmegaConf.load(config) # AWS S3 elif config.startswith(('s3://', 's3a://', 's3n://')): aws_s3_matched = re.match(r's3[a,n]?://([^/]+)/(.*)', config) if aws_s3_matched: bucket, key = aws_s3_matched.groups() config_content = aws_s3_read(bucket, key) config = OmegaConf.create(config_content) else: # Assume it's a config string that starts with s3 config_str = config config = OmegaConf.create(config_str) # Check if it's a config string or not # In case of a config string, it should create a config object # If not, it will create {'config': None} if config_str in config and config[config_str] is None: raise ValueError(f"config {config_str} is not a valid s3 path") # String Config else: # Assume it's a config string config_str = config config = OmegaConf.create(config_str) # Same as above, check if it's a config string or not if config_str in config and config[config_str] is None: raise ValueError(f"config {config_str} is not a valid path") elif isinstance(config, dict): config = OmegaConf.create(config) elif isinstance(config, (OmegaConf, DictConfig)): pass else: raise TypeError(f"config should be str, dict, or OmegaConf but got {type(config)}") return config @classmethod def save(cls, config, path: Union[str, Path]): """ Saves the configuration to a specified path. Args: config: The configuration to be saved. path (Union[str, Path]): The path where the configuration should be saved. Raises: ValueError: If the provided path is not a valid S3 path. """ if path.startswith(('s3://', 's3a://', 's3n://')): aws_s3_matched = re.match(r's3[a,n]?://([^/]+)/(.*)', path) if aws_s3_matched: bucket, key = aws_s3_matched.groups() aws_s3_write(bucket, key, config) else: raise ValueError(f"config path {path} is not a valid s3 path") else: OmegaConf.save(config, Path(path)) @classmethod def default(cls, emr: bool = False): """ Fill the missing config with default values. Args: emr (bool, optional): Flag indicating whether the config is for EMR. Defaults to False. Returns: dict: Default configuration dictionary. """ local_dir = f"{SystemSetting().CACHE_DIR}/.cache/dataverse/tmp" default = OmegaConf.create({ 'spark': { 'master': 'local[10]', 'appname': 'default', 'driver': { 'memory': '8G', 'maxResultSize': '2G', }, 'executor': {'memory': '1G'}, 'local': {'dir': local_dir}, 'ui': {'port': 4040}, }, 'etl': [], }) if emr: default.update({ 'emr': { 'id': None, 'working_dir': None, 'name': 'dataverse_emr', 'release': 'emr-6.15.0', 'idle_timeout': 3600, # master (driver) 'master_instance': { 'type': None, }, # core (data node) 'core_instance': { 'type': None, 'count': 2, }, # task (executors) 'task_instance': { 'type': None, 'count': 0, }, # EMR cluster created by dataverse or user 'auto_generated': None, # iam 'role': { 'ec2': { 'name': None, 'policy_arns': None, }, 'emr': { 'name': None, 'policy_arns': None, } }, 'instance_profile': { 'name': None, 'ec2_role': None, }, # TODO: allow more options to customize e.g. cidr, tag, etc. # but make sure vpc is temporary and not shared 'vpc': { 'id': None, }, 'subnet': { 'id': None, 'public_id': None, 'private_id': None, 'public': True, }, 'security_group': { 'id': None, }, 'gateway': { 'id': None, }, 'route_table': { 'id': None, }, 'elastic_ip': { 'id': None, }, 'nat_gateway': { 'id': None, }, } }) return default @classmethod def set_default(cls, config: OmegaConf, emr: bool = False): """ Sets the missing config arguments with default values. Args: config (OmegaConf): The configuration object to merge with default values. emr (bool, optional): Whether to use EMR configuration. Defaults to False. Returns: OmegaConf: The merged configuration object. """ return OmegaConf.merge(cls.default(emr=emr), config) ================================================ FILE: dataverse/etl/README.md ================================================ # ETL (Extract, Transform, Load) > Dataverse ETL is "Block-based coding powered by Spark" - Each block is called `ETL process` - Combination of ETL processes is called `ETL pipeline` - ETL pipeline is managed by `config` file ## 🌌 What is ETL process? > ETL process is the small code snippet, that is considered as a single unit of ETL pipeline. It is meant to be form various combinations to accommodate different kinds of data sources and transformations in ETL pipeline so it should be as generic as possible. ```python def ETL_process(data, config): return data ``` ## 🌌 What is ETL pipeline? > ETL pipeline is the sequence of ETL processes. ```python data = ETL_process_1() data = ETL_process_2(data) data = ETL_process_3(data) ``` ## 🌌 How to run ETL Pipeline? > Define the ETL process, and add in the config file to run the ETL pipeline. ```python from dataverse.etl import ETLPipeline from dataverse.config import Config # 1. Define the ETL process in the config file config = Config.load("TBD") config = Config.set_default(config) # 2. Run the ETL pipeline etl_pipeline = ETLPipeline() spark, data = etl_pipeline.run(config) ``` ### 🌠 What is returned after running ETL pipeline? > `spark` and `data` is returned after running ETL pipeline - `spark` - spark session - `data` - data after running ETL pipeline #### `spark` status depends on the last ETL process - `data_load` ETL process at the end - spark will be terminated - otherwise - spark will be alive - you can use `spark` to do whatever you want ## 🌌 How to add new ETL process? > ETL is managed by registry. Whatever ETL you make, you need to register it to registry. ### 🌠 Choose what `Category` & `Sub-Category` to put your ETL process > First you need to check the category and sub-category of the ETL process you want to add. ```python ====================================== - etl/ - CATEGORY/ - __init__.py - SUBCATEGORY.py - def CATEGORY___SUBCATEGORY___ETL_PROCESS() ====================================== ``` - `category` is the folder. This is pre-defined and you can add a new category if needed. **Check below to learn more about category** - `sub-category` is the python file. This is not pre-defined and you have to decide which name could be appropriate for the ETL process you want to add. Now when you know the `category` and `sub-category`, you can add a new ETL process. There are only one way to add a new ETL process ### 🌠 Use decorator `@register_etl` to register your ETL `function` ```python # check the __sample/ folder for example from dataverse.etl import register_etl @register_etl def category___subcategory___etl(rdd, config): # do something return rdd ``` #### ☣️ Inheriting `BaseETL` is NOT ALLOWED ☣️ ```python from dataverse.etl import BaseETL class category___subcategory___etl(BaseETL): def run(rdd, config): # do something return rdd ``` ### 🌠 ETL Process Class Naming Convention > This shared the same documentary with README.md in `__sample/` folder <details> ```python [ETL Category]___[ETL Sub-Category]___[ETL Name] ====================================== - "__sample/" - github.py - def __sample___github___remove_url() - def __sample___github___filter_by_stars() - "bias/" - mmlu.py - def bias___mmlu___remove_word() - def bias___mmlu___to_parquet() - ducky.py - def bias___ducky___fly() - def bias___ducky___quark() ====================================== ``` > caveat: the combination of `[ETL Category]___[ETL Sub-Category]___[ETL Name]` MUST be unique 1. `[ETL Category]` is the folder and category where the ETL is defined - `[ETL Category]` MUST be one of the following pre-defined list - `cleaning` - `decontamination` - `deduplication` - `data_ingestion` - `pil` - `quality` - `toxicity` - `bias` - `data_load` - `utils` 2. `[ETL Sub-Category]` is the name of the file where the ETL is defined - no pre-defined list - it could be a dataset name - or a nickname of yours - or whatever you think it's appropriate - e.g. `github` or `kaggle` or `mmlu` whatever you want 3. `[ETL Name]` naming should follow `function` naming convention, even it's `class` - all lower case - use underscore `_` to separate words 4. Each is separated by `___` (triple underscore) - e.g. `bias___mmlu___remove_word()` #### Why does folder, file name included in the ETL class name? - To avoid the following tmp names on dynamic construction of ETL class - e.g. `tmp___ipykernel_181248___remove_url` <- jupyter notebook env - e.g. `python3.10___abc___remove_url` <- dynamic class construction by `type` - so decided to control the name space by only `ETL class name` which includes folder, file name </details> ## 🌌 Principles for ETL Process > When you create your own ETL process, you should follow the following principles 1. No `DRY` (Don't Repeat Yourself) 2. One file Only ### 🌠 No `DRY` (Don't Repeat Yourself) > No `DRY` is applied between **ETL sub-categories**. - So if similar ETL processes are used in same sub-categories, it could be shared. - But if it's used in different sub-categories, it should not be shared. As you can see in the following example, there are 2 ETL processes `common_process_a` and `common_process_b`seems nice to be shared. But as you can see, they are not shared. They are repeated. This is because of the No `DRY` principle. ```python - deduplication/ - exact.py - "def common_process_a():" - "def common_process_b():" - def deduplication___exact___a(): - exact_datasketch.py - "def common_process_a():" - "def common_process_b():" - def deduplication___exact_datasketch___a(): - def deduplication___exact_datasketch___b(): ``` ### 🌠 One file Only Code that ETL process uses should be in the same file. This is because of the `One file Only` principle. Except **ETL Base class, few required utils functions, and open sources** there should be no dependency outside the file. ```python # This is OK ✅ - deduplication/ - exact.py - def helper_a(): - def helper_b(): - def etl_process(): helper_a() helper_b() # This is not allowed ❌ - deduplication/ - helper.py - def helper_a(): - def helper_b(): - exact.py from helper import helper_a from helper import helper_b - def etl_process(): helper_a() helper_b() ``` ETL process itself is meant to be built to be used in various combination of ETL pipeline **So try to make it as generic as possible.** 😊 ## 🌌 How to use ETL Process by Configuration > Now let's learn how to use ETL process by configuration ### 🌠 Register ETL process > This is same as above. Register ETL process using `@register_etl` decorator ```python from dataverse.etl import register_etl @register_etl def etl_process_start(spark, load_path, repartition=3): data = spark.read.load(load_path).repartition(repartition) return data @register_etl def etl_process_middle(data, threshold=0.5): data = data.filter(data['stars'] > threshold) return data @register_etl def etl_process_end(data, save_path, repartition=1): data.repartition(repartition).write.save(save_path) return None ``` ### 🌠 Define ETL process in the config file You can use the following config to run the above ETL processes in order - `etl_process_start` -> `etl_process_middle` -> `etl_process_end` ```yaml spark: appname: dataverse_etl_sample driver: memory: 4g etl: - name: etl_process_start args: load_path: ./sample/raw.parquet repartition: 3 - name: etl_process_middle args: threshold: 0.5 - name: etl_process_end args: save_path: ./sample/ufl.parquet repartition: 1 ``` **Check the following real example for more details** - Config located at `dataverse/config/etl/sample/ETL___one_cycle.yaml` ```yaml spark: appname: dataverse_etl_sample driver: memory: 16g etl: - name: data_ingestion___test___generate_fake_ufl - name: utils___sampling___random args: sample_n_or_frac: 0.1 - name: deduplication___minhash___lsh_jaccard - name: data_load___huggingface___ufl2hf_obj ``` ## 🌌 How to add a new ETL Category ### 🌠 Add a new folder to `etl/` folder ```python ====================================== - etl/ - YOUR_NEW_CATEGORY/ - __init__.py - YOUR_NEW_SUBCATEGORY.py - data_ingestion/ ... ====================================== ``` ### 🌠 Add a new category to `ETL_CATEGORY` in `registry.py` > Only added category will be recognized by the ETL pipeline ```python ETL_CATEGORIES = [ YOUR_NEW_CATEGORY, 'data_ingestion', 'decontamination', 'deduplication', 'bias', 'toxicity', 'cleaning', 'pii', 'quality', 'data_load', 'utils', ] ``` ### 🌠 Pre-defined ETL Categories ```python ====================================== - etl/ - "__sample/" - This is to show how to use the etl package - "data_ingestion/" - converting data from one format, schema to another - "data_load/" - saving data to desired location - "quality/" - improving data quality - e.g. removing data with low quality - "cleaning/" - cleaning data - e.g. removing HTML tags from text - e.g. data normalization - "decontamination/" - removing contamination from data - e.g. removing benchmark data from data - "deduplication/" - removing duplication inside data - "pii/" - removing PII from data - "bias/" - - removing bias from data - e.g. removing data with gender bias words - "toxicity/" - removing toxic data - e.g. removing data with toxic words - "utils/" - utilities for the ETL process - e.g. sampling, logging, error handling, etc ====================================== ``` ## 🌌 How to Ignore specific ETL Sub-Category > If you want to ignore some of the `ETL sub-category` python files, you can add the file name to `ETL_IGNORE` in `registry.py` when you want to make a file just for storage purpose, you can add the file name to `ETL_IGNORE` in `registry.py` ```python ETL_IGNORE = [ '__init__.py', 'storage.py' ] ``` ================================================ FILE: dataverse/etl/__init__.py ================================================ from .registry import ETLRegistry from .registry import register_etl from .registry import BaseETL from .pipeline import ETLPipeline ================================================ FILE: dataverse/etl/__sample/README.md ================================================ # Sample > This is a showcase ================================================ FILE: dataverse/etl/__sample/__init__.py ================================================ ================================================ FILE: dataverse/etl/__sample/ducky.py ================================================ from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl from typing import Union @register_etl def __sample___ducky___make_your_own_etl_processor(data: Union[RDD, DataFrame], *args, **kwargs): """ decorator will convert this function to BaseETL class """ print("make_your_own_etl_processor") return data ================================================ FILE: dataverse/etl/__sample/github.py ================================================ from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import BaseETL from dataverse.etl import register_etl from dataverse.etl import ETLRegistry from dataverse.etl.registry import ETLStructure from typing import Union @register_etl def __sample___github___using_decorator(data: Union[RDD, DataFrame], *args, **kwargs): """ decorator will convert this function to BaseETL class """ print("sample using decorator") return data @register_etl def __sample___github___config(data: Union[RDD, DataFrame], config: dict = None, *args, **kwargs): """ decorator will convert this function to BaseETL class """ print("config says", config) return data if __name__ == "__main__": registry = ETLRegistry() print("[ Testing ] registry etl using decorator") # this could seem like a function but it is actually a BaseETL class etl = __sample___github___using_decorator etl()(data=None) print("is subclass of ETLStructure?", issubclass(etl, ETLStructure), "\n") print("[ Testing ] registry etl using decorator with config") etl = __sample___github___config etl()(data=None, config={"hello": "world"}) print("is subclass of ETLStructure?", issubclass(etl, ETLStructure), "\n") # check is it properly registryed print("[ Testing ] check is it properly registry") print("="*50) print(registry._registry) print("="*50) ================================================ FILE: dataverse/etl/bias/README.md ================================================ ================================================ FILE: dataverse/etl/bias/__init__.py ================================================ ================================================ FILE: dataverse/etl/cleaning/README.md ================================================ # Cleaning > Data normalization, removing noise, and other data cleaning tasks. ## 🌌 Naming Convention > This is a strong recommendation. You can use your own naming convention if you want. ```python def cleaning___[ETL Sub-Category]___[ETL Process]() ``` - `ETL Sub-Category` - the data source to handle - e.g. unicode - e.g. char - e.g. word - e.g. number - `ETL process name` - purpose of the ETL process - e.g. remove - e.g. filter - e.g. normalize ================================================ FILE: dataverse/etl/cleaning/__init__.py ================================================ ================================================ FILE: dataverse/etl/cleaning/char.py ================================================ """ A collection of modules for cleaning data at the character level. For example: whitespace, accent characters, and unprintable characters. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import re import unicodedata from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def cleaning___char___normalize_whitespace( spark, data: Union[RDD, DataFrame], subset: str = "text", *args, **kwargs ) -> RDD: r""" Normalize whitespace. - Strips the leading and trailing whitespaces. - Replaces all consecutive whitespaces with a single space, excluding ``\n`` and ``\r`` characters. Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str): A subset or column to consider. Defaults to 'text'. Returns: RDD: The processed data with normalized whitespace. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) pattern = re.compile(r"[^\S\r\n]+") def _normalize_whitespace(row): row[subset] = re.sub(pattern, " ", row[subset].strip()) return row data = data.map(_normalize_whitespace) return data @register_etl def cleaning___char___remove_unprintable( spark, data: Union[RDD, DataFrame], subset="text", *args, **kwargs ) -> RDD: """ Remove all the non-printable characters. Code is from facebookresearch/cc_net https://github.com/facebookresearch/cc_net/blob/main/cc_net/text_normalizer.py Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str): A subset or column to consider. Defaults to 'text'. Returns: RDD: The processed data with unprintable characters are removed. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _remove_non_printable_char(row): new_lines = [] for line in row[subset].split("\n"): new_lines.append( re.sub(f"[{''.join(map(chr, list(range(0,32)) + list(range(127,160))))}]", "", line) ) row[subset] = "\n".join(new_lines) return row data = data.map(_remove_non_printable_char) return data def strip_accents(text: str) -> str: """Strips accents from a piece of text.""" nfd = unicodedata.normalize("NFD", text) output = [c for c in nfd if unicodedata.category(c) != "Mn"] if len(output) == text: return text return "".join(output) @register_etl def cleaning___char___remove_accent( spark, data: Union[RDD, DataFrame], subset: str = "text", *args, **kwargs ) -> RDD: """Strips accents from a piece of text. +--------+--------+ | input | output | +========+========+ | café | cafe | | résumé | resume | +--------+--------+ Code is from facebookresearch/cc_net https://github.com/facebookresearch/cc_net/blob/main/cc_net/text_normalizer.py Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str): A subset or column to consider. Defaults to 'text'. Returns: The processed data with accents removed. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _strip_accents(row): row[subset] = strip_accents(row[subset]) return row data = data.map(_strip_accents) return data ================================================ FILE: dataverse/etl/cleaning/document.py ================================================ """ A collection of modules for cleaning data at the document level. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def cleaning___document___split_by_word( spark, data: Union[RDD, DataFrame], subset: str = "text", word_per_chunk: int = 100, delimiter: str = " ", *args, **kwargs ) -> RDD: """ Split documents into smaller chunks by word. Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str, optional): A subset or column to consider. Defaults to 'text'. word_per_chunk (int, optional): Number of words per chunk. Defaults to 100. delimiter (str, optional): Delimiter to split the text. Defaults to " ". Returns: RDD: The processed data with documents split into smaller chunks. Raises: ValueError: If word_per_chunk is not a positive integer. Examples: - word_per_chunk = 2 - delimiter = " " - input +-----------------------------+ | text | +=============================+ | "hello world, how are you?" | +-----------------------------+ - output +----------------+ | text | +================+ | "hello world," | +----------------+ | "how are" | +----------------+ | "you?" | +----------------+ Caveats: - NO normalization is done here! - This doesn't consider the whitespace normalization. - Recommend using other normalization before this. - All the keys from the original row are copied to all the new rows created. - ``id`` is not unique anymore. - Make sure ``id`` is assigned after this step. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _split_by_word(row): words = row[subset].split(delimiter) # Create chunks chunks = [] for i in range(0, len(words), word_per_chunk): chunks.append(delimiter.join(words[i : i + word_per_chunk])) # Create a new dictionary for each chunk with all the keys from the original row return [{**row, subset: chunk} for chunk in chunks] data = data.flatMap(_split_by_word) return data ================================================ FILE: dataverse/etl/cleaning/html.py ================================================ """ A collection of modules for cleaning data includes html. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import Union import html2text from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def cleaning___html___extract_plain_text( spark, data: Union[RDD, DataFrame], subset: str = "text", use_trafilatura: bool = False, *args, **kwargs ) -> RDD: r""" Extracts plain text from HTML. Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str, optional): A subset or column to consider. Defaults to 'text'. use_trafilatura (bool, optional): Whether to use trafilatura instead of html2text. Defaults to False. Returns: The plain data extracted from html. Caveats: - ``html2text`` adds a double newline after each paragraph, which is not handled at this point. - The option to use `trafilatura` is provided because extracting plain text with ``trafilatura`` does not seem to work well in some cases. - [OK] Case:: text = "<body><h1>My First Heading</h1><p>My first paragraph.</p></body>" # html2text print(html2text.html2text(text)) >>> '# My First Heading\n\nMy first paragraph.\n\n' # trafilatura print(trafilatura.html2txt(text)) >>> 'My First HeadingMy first paragraph.' - [ERROR] Case (trafilatura removes all the text):: text = "<p>hello <br> nice to meet you.</p>" # html2text print(html2text.html2text(text)) >>> 'hello \nnice to meet you.\n\n' # trafilatura print(trafilatura.html2txt(text)) >>> '' """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) # this is optional if use_trafilatura: import trafilatura def _html2txt(row): row[subset] = trafilatura.html2txt(row[subset]) return row else: def _html2txt(row): row[subset] = html2text.html2text(row[subset]) return row data = data.map(_html2txt) return data ================================================ FILE: dataverse/etl/cleaning/korean.py ================================================ """ This is only for Korean text datas. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import re from enum import IntEnum from typing import List, Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl class KoreanType(IntEnum): JAUM = 0 MOUM = 1 COMPLETE = 2 ELSE = -1 KOR_BEGIN = 44032 KOR_END = 55203 CHOSUNG_BASE = 588 JUNGSUNG_BASE = 28 JAUM_BEGIN = 12593 JAUM_END = 12622 MOUM_BEGIN = 12623 MOUM_END = 12643 # fmt: off CHOSUNG = ["ㄱ", "ㄲ", "ㄴ", "ㄷ", "ㄸ", "ㄹ", "ㅁ", "ㅂ", "ㅃ", "ㅅ", "ㅆ", "ㅇ", "ㅈ", "ㅉ", "ㅊ", "ㅋ", "ㅌ", "ㅍ", "ㅎ"] JUNGSUNG = ["ㅏ", "ㅐ", "ㅑ", "ㅒ", "ㅓ", "ㅔ", "ㅕ", "ㅖ", "ㅗ", "ㅘ", "ㅙ", "ㅚ", "ㅛ", "ㅜ", "ㅝ", "ㅞ", "ㅟ", "ㅠ", "ㅡ", "ㅢ", "ㅣ"] JONGSUNG = [" ", "ㄱ", "ㄲ", "ㄳ", "ㄴ", "ㄵ", "ㄶ", "ㄷ", "ㄹ", "ㄺ", "ㄻ", "ㄼ", "ㄽ", "ㄾ", "ㄿ", "ㅀ", "ㅁ", "ㅂ", "ㅄ", "ㅅ", "ㅆ", "ㅇ", "ㅈ", "ㅊ", "ㅋ", "ㅌ", "ㅍ", "ㅎ"] JAUM = ["ㄱ", "ㄲ", "ㄳ", "ㄴ", "ㄵ", "ㄶ", "ㄷ", "ㄸ", "ㄹ", "ㄺ", "ㄻ", "ㄼ", "ㄽ", "ㄾ", "ㄿ", "ㅀ", "ㅁ", "ㅂ", "ㅃ", "ㅄ", "ㅅ", "ㅆ", "ㅇ", "ㅈ", "ㅉ", "ㅊ", "ㅋ", "ㅌ", "ㅍ", "ㅎ"] MOUM = ["ㅏ", "ㅐ", "ㅑ", "ㅒ", "ㅓ", "ㅔ", "ㅕ", "ㅖ", "ㅗ", "ㅘ", "ㅙ", "ㅚ", "ㅛ", "ㅜ", "ㅝ", "ㅞ", "ㅟ", "ㅠ", "ㅡ", "ㅢ", "ㅣ"] # fmt: on def character_is_korean(c): i = ord(c) return ( (KOR_BEGIN <= i <= KOR_END) or (JAUM_BEGIN <= i <= JAUM_END) or (MOUM_BEGIN <= i <= MOUM_END) ) def decompose(c): if not character_is_korean(c): return None i = ord(c) if JAUM_BEGIN <= i <= JAUM_END: return c, " ", " " if MOUM_BEGIN <= i <= MOUM_END: return " ", c, " " i -= KOR_BEGIN cho = i // CHOSUNG_BASE jung = (i - cho * CHOSUNG_BASE) // JUNGSUNG_BASE jong = i - cho * CHOSUNG_BASE - jung * JUNGSUNG_BASE return CHOSUNG[cho], JUNGSUNG[jung], JONGSUNG[jong] def compose(chosung, jungsung, jongsung): unicode = KOR_BEGIN unicode += CHOSUNG_BASE * CHOSUNG.index(chosung) unicode += JUNGSUNG_BASE * JUNGSUNG.index(jungsung) unicode += JONGSUNG.index(jongsung) return chr(unicode) def cleaning___korean___filter_by_ratio( spark, data: Union[RDD, DataFrame], subset: str = "text", filter_type: str = "word", korean_ratio: float = 0.5, *args, **kwargs, ) -> RDD: """ Filters out the text that has less than `korean_ratio` excluding space. Code is from eleutherAI/dps and was modified https://github.com/EleutherAI/dps/blob/master/dps/spark/prep/korean_prep.py#L52 Args: spark (SparkSession): The Spark session object. data(Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. subset(str, optional): A subset or column to consider. Defaults to 'text'. filter_type(str, optional): The type of filtering to be applied. Can be 'char' or 'word'. Defaults to 'word'. korean_ratio(float, optional) : The minimum ratio of Korean characters or words required for a text to survive the filtering. Defaults to 0.5. Returns: The filtered data with it's Korean ratio. Raises: ValueError: If the filter_type is not 'char' or 'word', or if the korean_ratio is not between 0 and 1. Examples: With korean_ratio = 0.5 +------------------------------------------------+ | text | +================================================+ | "한국어가 포함 비율이 50% 이상인 경우만 남김" | +------------------------------------------------+ - filter_type = 'char' -> [survive!] - Korean characters: 17 - Non-Korean characters: 3 - Total characters: 20 - Korean character ratio: 17 / 20 > 0.5 -> True - filter_type = 'word' -> [survive!] - Korean characters: 6 - Non-Korean characters: 1 - Total characters: 7 - Korean character ratio: 6 / 7 > 0.5 -> True +------------------------------------------------+ | text | +================================================+ | "korean including 비율이 50% 미만인 경우 제거" | +------------------------------------------------+ - filter_type = 'char' -> [remove!] - Korean characters: 10 - Non-Korean characters: 28 - Total characters: 38 - Korean word ratio: 10 / 38 > 0.5 -> False - filter_type = 'word' -> [survive!] - Korean characters: 4 - Non-Korean characters: 3 - Total characters: 7 - Korean word ratio: 4 / 7 > 0.5 -> True Note: - The regex to count Korean characters doesn't work properly on characters that are not words. - e.g 안녕"하세요 is counted is 2 korean words - ["안녕", "하세요"] """ assert filter_type in [ "char", "word", ], f"filter_type should be either `char` or `word` but got {filter_type}" assert ( 0.0 <= korean_ratio <= 1.0 ), f"korean_ratio should be between 0. ~ 1. but got {korean_ratio}" if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _korean_ratio_filter(row): if row[subset] is None or len(row[subset]) == 0: return False if filter_type == "char": korean_counts = len(re.findall("[ㄱ-힣]", row[subset])) all_counts = len(re.sub("[ \r\n\t\f\v]", "", row[subset])) if filter_type == "word": korean_counts = len(re.findall(r"\b[\w]*[ㄱ-힣][\w]*\b", row[subset])) all_counts = len(re.findall(r"\b\w+\b", row[subset])) if all_counts == 0: return False return (korean_counts / all_counts) >= korean_ratio data = data.filter(_korean_ratio_filter) return data def classify_korean_type(unicode): if JAUM_BEGIN <= unicode <= JAUM_END: return KoreanType.JAUM elif MOUM_BEGIN <= unicode <= MOUM_END: return KoreanType.MOUM elif KOR_BEGIN <= unicode <= KOR_END: return KoreanType.COMPLETE else: return KoreanType.ELSE def reduce_repeated_emotions(text, num_repeats=2): if num_repeats > 0: repeat_chars_pattern = re.compile(r"(\w)\\1{2,}") text = repeat_chars_pattern.sub("\\1" * num_repeats, text) return text @register_etl def cleaning___korean___reduce_emoticon( spark, data: Union[RDD, DataFrame], subset: Union[str, List[str]] = "text", num_repeats: int = 2, *args, **kwargs, ) -> RDD: """ Reduces emoticon Korean characters. It performs the following steps: 1. Splits complete Korean characters into individual characters, preserving only the previous jaum and next moum. - e.g. (remain) ㅋㅋ킄ㅋㅋㅋ -> ㅋㅋ킄ㅋㅋㅋ - e.g. (splited) ㅋㅋ쿠ㅜㅜㅜ -> ㅋㅋㅋㅜㅜㅜㅜ 2. Reduces repeating Korean characters. - e.g. ㅋㅋㅋㅋㅋ -> ㅋㅋ Args: spark(SparkSession): The Spark session object. data(Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. subset(str, optional): A subset or columns to consider. Defaults to 'text'. num_repeats(int, optional): The number of repeating characters to reduce. Defaults to 2. Returns: RDD: The processed data with reduced emoticon Korean characters. Note: **[ potential risk of splitting complete korean character ]** splitting emoticon characters into individual characters has high risk inside so only left one case that is `complete korean character between jaum and moum` other cases were added also but due to the risk, wiped out References: - `soynlp normalizer.py <https://github.com/lovit/soynlp/blob/master/soynlp/normalizer/_normalizer.py>`_ - `dps korean_prep.py <https://github.com/EleutherAI/dps/blob/master/dps/spark/prep/korean_prep.py>`_ """ def _reduce_korean_emotion(row): text = row[subset] if not text: return row korean_types = [classify_korean_type(ord(c)) for c in text] last_idx = len(korean_types) - 1 normalized_text = [] for i, (korean_type, c) in enumerate(zip(korean_types, text)): # when complete korean character is between jaum and moum if (0 < i < last_idx) and ( korean_types[i - 1] == KoreanType.JAUM and korean_type == KoreanType.COMPLETE and korean_types[i + 1] == KoreanType.MOUM ): cho, jung, jong = decompose(c) # case 1. when complete kor char is combination of prev jaum and next moum # e.g. ㅋ(쿠)ㅜ -> ㅋ(ㅋㅜ)ㅜ if cho == text[i - 1] and jung == text[i + 1] and jong == " ": normalized_text.append(cho) normalized_text.append(jung) # case 2. otherwise, just leave it # e.g. ㅋ(쿵)ㅜ -> ㅋ(쿵)ㅜ else: normalized_text.append(c) else: normalized_text.append(c) row[subset] = reduce_repeated_emotions("".join(normalized_text), num_repeats) return row if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) data = data.map(_reduce_korean_emotion) return data ================================================ FILE: dataverse/etl/cleaning/length.py ================================================ """ Filtering based on length. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def cleaning___length___char_len_filter( spark, data: Union[RDD, DataFrame], subset: str = "text", min_len: int = None, max_len: int = None, *args, **kwargs ) -> RDD: """ Filters the data by character length. Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str, optional): A subset or column to consider. Defaults to 'text'. min_len (int, optional): The minimum length of characters to filter. If None, there is no minimum length. max_len (int, optional): The maximum length of characters to filter. If None, there is no maximum length. Returns: The filtered data as an RDD. Raises: ValueError: If both min_len and max_len are None. Note: - min_len <= len <= max_len - min_len and max_len can not be None at the same time. - If min_len is None, then only the maximum length is considered. - If max_len is None, then only the minimum length is considered. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) assert ( min_len is not None or max_len is not None ), "min_len and max_len cannot be None at the same time" if min_len is not None and max_len is not None: data = data.filter(lambda row: min_len <= len(row[subset]) <= max_len) elif min_len is None: data = data.filter(lambda row: len(row[subset]) <= max_len) elif max_len is None: data = data.filter(lambda row: min_len <= len(row[subset])) return data @register_etl def cleaning___length___word_len_filter( spark, data: Union[RDD, DataFrame], subset="text", min_len: int = None, max_len: int = None, *args, **kwargs ): """ filter by word length min_len <= len <= max_len - if min_len is None, then len <= max_len - if max_len is None, then len >= min_len args: subset: column to filter min_len: minimum length to filter max_len: maximum length to filter """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) assert ( min_len is not None or max_len is not None ), "min_len and max_len cannot be None at the same time" if min_len is not None and max_len is not None: data = data.filter(lambda row: min_len <= len(row[subset].split()) <= max_len) elif min_len is None: data = data.filter(lambda row: len(row[subset].split()) <= max_len) elif max_len is None: data = data.filter(lambda row: min_len <= len(row[subset].split())) return data ================================================ FILE: dataverse/etl/cleaning/number.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import re from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def cleaning___number___normalize( spark, data: Union[RDD, DataFrame], subset: str = "text", assign_number: int = 0, *args, **kwargs, ) -> RDD: """ Convert all the number to assigned number (e.g. 0) Code is from facebookresearch/cc_net https://github.com/facebookresearch/cc_net/blob/main/cc_net/text_normalizer.py Examples: - input +----------+ | text | +==========+ | 1234| | 1234.5678| +----------+ - output +----------+ | text | +==========+ | 0000| | 0000.0000| +----------+ Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. subset (str, optional): A subset or column to consider. Defaults to 'text'. assign_number (int, optional): The number to assign. Default is 0. Returns: The normalized data. Raises: AssertionError: If assign_number is not between 0 and 9 (inclusive). """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _normalize_number(row): row[subset] = re.sub(r"\d", str(assign_number), row[subset]) return row # assign_number is between 0 ~ 9 assert assign_number in range( 10 ), f"assign_number should be between 0 ~ 9 but got {assign_number}" data = data.map(_normalize_number) return data ================================================ FILE: dataverse/etl/cleaning/table.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from pyspark.sql import functions as F from dataverse.etl.registry import register_etl @register_etl def cleaning___table___merge_col_vertical( spark, data: Union[RDD, DataFrame], col1: str = None, col2: str = None, merge_col_name: str = "merge_col", *args, **kwargs ) -> RDD: """ Merges two columns vertically into one column. Example: Before: +------+------+---------+ | col1 | col2 | species | +======+======+=========+ | 1 | 2 | duck | +------+------+---------+ | 3 | 4 | duck | +------+------+---------+ | 5 | 6 | ducky | +------+------+---------+ After calling ``cleaning_table_merge_col_vertical(...)``: +--------+---------+ | number | species | +========+=========+ | 1 | duck | +--------+---------+ | 3 | duck | +--------+---------+ | 5 | ducky | +--------+---------+ | 2 | duck | +--------+---------+ | 4 | duck | +--------+---------+ | 6 | ducky | +--------+---------+ Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. col1 (str): The name of the first column to merge. col2 (str): The name of the second column to merge. merge_col_name (str, optional): The name of the merged column. Returns: The processed data with the merged column. Raises: ValueError: If col1 or col2 is not specified. """ if isinstance(data, RDD): data = data.toDF() assert col1 is not None, "col1 must be specified" assert col2 is not None, "col2 must be specified" rest_cols = [c for c in data.columns if c not in [col1, col2]] df1 = data.select(*rest_cols, F.col(col1).alias(merge_col_name)) df2 = data.select(*rest_cols, F.col(col2).alias(merge_col_name)) # union the dataframes data = df1.union(df2) return data ================================================ FILE: dataverse/etl/cleaning/unicode.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import re import unicodedata from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl UNICODE_PUNCT = { ",": ",", "。": ".", "、": ",", "„": '"', "”": '"', "“": '"', "«": '"', "»": '"', "1": '"', "」": '"', "「": '"', "《": '"', "》": '"', "´": "'", "∶": ":", ":": ":", "?": "?", "!": "!", "(": "(", ")": ")", ";": ";", "–": "-", "—": " - ", ".": ". ", "~": "~", "’": "'", "…": "...", "━": "-", "〈": "<", "〉": ">", "【": "[", "】": "]", "%": "%", "►": "-", } @register_etl def cleaning___unicode___remove_punct( spark, data: Union[RDD, DataFrame], subset: str = "text", *args, **kwargs ) -> RDD: """ Removes all the Unicode punctuations. Code is from facebookresearch/cc_net https://github.com/facebookresearch/cc_net/blob/main/cc_net/text_normalizer.py Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. subset (str, optional): A subset or column to consider. Defaults to 'text'. Returns: The cleaned data. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _remove_unicode_punct(row): row[subset] = re.sub(f"[{''.join(UNICODE_PUNCT.keys())}]", "", row[subset]) return row data = data.map(_remove_unicode_punct) return data @register_etl def cleaning___unicode___replace_punct( spark, data: Union[RDD, DataFrame], subset: str = "text", *args, **kwargs ) -> RDD: """ Replace all the unicode punctuations Code is from facebookresearch/cc_net https://github.com/facebookresearch/cc_net/blob/main/cc_net/text_normalizer.py Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. subset (str, optional): A subset or column to consider. Defaults to 'text'. Returns: The cleaned data. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _replace_unicode_punct(row): row[subset] = "".join((UNICODE_PUNCT.get(c, c) for c in row[subset])) return row data = data.map(_replace_unicode_punct) return data @register_etl def cleaning___unicode___normalize( spark, data: Union[RDD, DataFrame], subset="text", *args, **kwargs ): """ Normalize the unicode Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. It can be either an RDD or a DataFrame. subset (str, optional): A subset or column to consider. Defaults to 'text'. Returns: The cleaned data. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _normalize(row): row[subset] = unicodedata.normalize("NFC", row[subset]) return row data = data.map(_normalize) return data ================================================ FILE: dataverse/etl/data_ingestion/README.md ================================================ # Data Ingestion > Ingest various data sources into the desired format **Recommendation for Data Ingestion** > Use Data Ingestion to convert all datasets to unified format you choose before preprocessing(transform) - for `Text Only` Dataset, recommend using `ufl` format - for details on `ufl` format, see below - for `other` dataset, consider creating a new unified format ## 📚 Data Ingestion Flow > This is the recommended flow for data ingestion, but not mandatory There is 2 types of data ingestion flow for standard - **1 step flow** (load & template) - load `raw data` to `desired format` directly - **2 step flow** (load -> template) - load `raw data` to `raw format` first with **dict type** - convert `raw format` to `desired format` If you want to create 3 steps, thats on you. Remember this is just a guideline. ### 📗 Why 2 step flow? > To support various templates for the same data source Let's suppose we are ingesting `mmlu` dataset and our desired format is `ufl` format. And with the following 2 templates, we can create 2 different data with `ufl` format. To give user a broader choice, multiple templates for the same data source is necessary and 2 step flow is the way to go. ```python # raw format raw = { "question": "Let p = (1, 2, 5, 4)(2, 3) in S_5 . Find the index of <p> in S_5.", "choices": ["8", "2", "24", "120"], "answer": 1, } # template v1 - only question (q) ufl = { 'id': "b1c2d3e4f5g6h7i8j9k0", 'name': "mmlu", 'text': "Let p = (1, 2, 5, 4)(2, 3) in S_5 . Find the index of <p> in S_5.", 'meta': {}, } # template v2 - question, answer (qa) ufl = { 'id': "a1b2c3d4e5f6g7h8i9j0", 'name': "mmlu", 'text': "question: Let p = (1, 2, 5, 4)(2, 3) in S_5 . Find the index of <p> in S_5.\nanswer: 8", 'meta': {}, } ``` ## 📚 Naming Convention > This is a strong recommendation. You can use your own naming convention if you want. ```python def data_ingestion___[ETL Sub-Category]___[raw source]2[target format]() ``` - `ETL Sub-Category` - 2 types of sub-category (python file) 1. Name to the data source name to handle (`specific` purpose) - e.g. mmlu - e.g. squad 2. Name `file format` itself (`general` purpose) - e.g. parquet - e.g. csv - e.g. hugingface - `ETL process name` - Name the ETL process as the `raw source` -> `target format` - **raw source** - `file format` - `parquet` - (loading data from parquet) - `hf` - (loading data from huggingface dataset) - `csv` - (loading data from csv) - etc - `raw` - the data is already loaded in memory as raw - **target format** - `ufl` - (loading data to ufl format) - e.g. `parquet2ufl` means loading parquet to ufl format - e.g. `hf2ufl` means loading huggingface dataset to ufl format - `raw` - (loading data w/o any transformation) - e.g. `parquet2raw` means loading parquet to raw format - e.g. `hf2raw` means loading huggingface dataset to raw format - `[YOUR_FORMAT]` - this is on you **caveat** - `ufl` is not a file format rather a schema(data format). ### 📗 1 step flow > direct loading raw data to desired format - In case of your data is already saved in UFL format, use `raw` loading ETL process - e.g. `hf2raw` could be used as total 1 step when your data is already saved in UFL format ```python - "data_ingestion/" # converting raw data to desired format - mmlu.py - def data_ingestion___mmlu___parquet2ufl() - def data_ingestion___mmlu___hf2ufl() - squad.py - def data_ingestion___squad___hf2ufl() - mnist.py - def data_ingestion___mnist___csv2ufl() # this is used when loading UFL format saved in parquet - parquet.py - def data_ingestion___parquet___pq2ufl() ``` ### 📗 2 step flow > loading raw data to raw format first and then convert to desired format #### 📖 Step 1 - load raw data to raw format ```python - "data_ingestion/" # converting raw data to raw format - huggingface.py - def data_ingestion___huggingface___hf2raw() - mmlu.py - def data_ingestion___mmlu___parquet2raw() - def data_ingestion___mmlu___hf2raw() - mnist.py - def data_ingestion___mnist___csv2raw() ``` #### 📖 Step 2 - convert raw format to desired format - Name the ETL process as the `raw format` -> `target format` - e.g. `raw2ufl` means converting raw format to ufl format - Add template name to the end of the function name - e.g. `raw2ufl_q` means converting raw format to ufl format with `question` template - e.g. `raw2ufl_qa` means converting raw format to ufl format with `question & answer` template ```python - "data_ingestion/" # converting raw format to desired format - mmlu.py - def data_ingestion___mmlu___raw2ufl_q() - def data_ingestion___mmlu___raw2ufl_qa() - squad.py - def data_ingestion___squad___raw2ufl_v1() - mnist.py - def data_ingestion___mnist___raw2ufl_v1() ``` ## 📚 UFL (Upstage Format for LLM) > This is the schema(data format) recommended by the Upstage LLM. Dataverse standard format for preparing pretraining dataset. ```python { "id":"uuid", "name": "string", "text":"string", "meta": "string", } ``` - `id` - uuid v1 - `name` - name of the dataset - `text` - text of the dataset - `meta` - meta data of the dataset - meta data is a stringified json object ### 📗 Why stringified for meta data? > Meta data does not have a fixed schema. It can be anything. So, it is stringified to avoid any issues with the schema. **huggingface datasets** - when 2 datasets have different meta data schema, it will throw an error when merging the datasets ================================================ FILE: dataverse/etl/data_ingestion/__init__.py ================================================ ================================================ FILE: dataverse/etl/data_ingestion/arrow.py ================================================ """ Load Arrow. Support direct loading of arrow saved huggingface dataset to spark dataframe. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import glob import os from typing import List, Union import numpy as np import pyarrow as pa from omegaconf import ListConfig from pyspark.rdd import RDD from dataverse.etl import register_etl def find_arrow_paths(directory): """find *.arrow files recursively""" if isinstance(directory, str): return glob.glob(os.path.join(directory, "**/*.arrow"), recursive=True) elif isinstance(directory, list) or isinstance(directory, ListConfig): arrow_paths = [] for d in directory: arrow_paths.extend(find_arrow_paths(d)) return arrow_paths raise ValueError(f"directory must be str or list, got {type(directory)}") def get_dir_size(arrow_paths): total_size = 0 for fp in arrow_paths: # skip if it is not `.arrow` file if not fp.endswith(".arrow"): continue # skip if it is symbolic link if not os.path.islink(fp): total_size += os.path.getsize(fp) return total_size def arrow_table_to_dict(arrow_path): """ speed 10000 take - 70ms faster than - pyarrow -> pydict direct loading - pyarrow -> pandas -> pydict loading TODO: speed and memory improvement """ in_memory_stream = pa.input_stream(arrow_path) opened_stream = pa.ipc.open_stream(in_memory_stream) table = opened_stream.read_all() # get schema for field names schema = table.schema rows = [] # iterate over each row for row in range(table.num_rows): row_data = { schema.field(col).name: table.column(col)[row].as_py() for col in range(table.num_columns) } rows.append(row_data) return rows @register_etl def data_ingestion___arrow___hf2raw( spark, path: Union[str, List[str]], sample_n: int = -1, arrow_partition_mb_size: int = -1, raw_partition_mb_size: int = 256, repartition: int = -1, seed: int = 42, verbose: bool = True, *args, **kwargs, ) -> RDD: """ Directly loads the arrow saved HuggingFace dataset to raw format as a dictionary. Args: spark (SparkSession): The Spark session object. path (Union[str, List[str]]): The path of the arrow folders. sample_n (int, optional): The number of arrow files to be sampled. Defaults to -1. If sample_n is -1, all arrow files will be loaded. arrow_partition_mb_size (int, optional): The size of each arrow partition in MB. Defaults to -1. If arrow_partition_size is -1, it will repartition arrow files by the number of arrow files. This assumes that arrow file size is evenly distributed. When there is data skew in arrow file size, it is recommended to use the default (-1). raw_partition_mb_size (int, optional): The size of each raw partition in MB. Defaults to 256. This is activated only when repartition is -1. repartition (int, optional): Manually choose the number of partitions. Defaults to -1. seed (int, optional): The seed for sampling. Defaults to 42. verbose (bool, optional): Whether to print the information of the dataset. Defaults to True. Returns: RDD: The RDD containing the raw data in dictionary format. Examples: >>> import datasets >>> dataset = datasets.load_dataset('ducky') >>> dataset.save_to_disk('your/path/to/ducky') >>> data_ingestion___arrow___hf2raw()(spark, 'your/path/to/ducky') Caveats: Arrow paths are repartitioned by the number of arrow files. """ arrow_paths = find_arrow_paths(path) assert len(arrow_paths) > 0, f"no arrow files found in {path}" # sample from the arrow files if sample_n > 0 and sample_n < len(arrow_paths): np.random.seed(seed) arrow_paths = np.random.choice(arrow_paths, size=sample_n, replace=False) if arrow_partition_mb_size == -1: # if data is skewed, recommend to use default (-1) arrow_repartition = len(arrow_paths) else: # this assume that arrow file size is evenly distributed assert ( arrow_partition_mb_size > 0 ), f"arrow_partition_mb_size must be positive, got {arrow_partition_mb_size}" arrow_total_mb_size = get_dir_size(arrow_paths) / 1024 / 1024 arrow_repartition = arrow_total_mb_size // arrow_partition_mb_size arrow_repartition += 1 if arrow_total_mb_size % arrow_partition_mb_size else 0 arrow_repartition = min(int(arrow_repartition), len(arrow_paths)) rdd = spark.sparkContext.parallelize(arrow_paths) rdd = rdd.repartition(arrow_repartition) rdd = rdd.flatMap(arrow_table_to_dict) if repartition != -1: raw_repartition = repartition else: assert ( raw_partition_mb_size > 0 ), f"raw_partition_mb_size must be positive, got {raw_partition_mb_size}" arrow_total_mb_size = get_dir_size(arrow_paths) / 1024 / 1024 raw_repartition = arrow_total_mb_size // raw_partition_mb_size raw_repartition += 1 if arrow_total_mb_size % raw_partition_mb_size else 0 # count the number of data points (this is expensive) # this is to prevent the case where the number of data points is less than raw_repartition total_data_n = rdd.count() raw_repartition = min(int(raw_repartition), total_data_n) rdd = rdd.repartition(raw_repartition) return rdd ================================================ FILE: dataverse/etl/data_ingestion/common_crawl.py ================================================ """ Load Common Crawl data from dump-id & segment files Code is from facebookresearch/cc_net with some modifications https://github.com/facebookresearch/cc_net This is a migration of the code to Dataverse. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import functools import glob import gzip import io import json import os import sys import tempfile import time import typing as tp import warnings from pathlib import Path from typing import Iterable, List, Optional, TextIO, Union from urllib.parse import urlparse import numpy as np import requests from pyspark.rdd import RDD from dataverse.etl import register_etl from dataverse.utils.format import get_uuidv1 from dataverse.utils.setting import SystemSetting def parse_doc(headers: List[str], doc: List[str]) -> Optional[dict]: """Headers format is: WARC/1.0 WARC-Type: conversion WARC-Target-URI: [url] WARC-Date: [crawldate: 2019-02-15T19:15:59Z] WARC-Record-ID: <urn:uuid:8865156e-d5f1-4734-9c68-4b46eaf2bb7e> WARC-Refers-To: <urn:uuid:340152e2-65cf-4143-b522-8ce4e2d069d7> WARC-Block-Digest: sha1:S3DTWCONT2L6ORTGCY2KXEZ37LNBB7V2 Content-Type: text/plain Content-Length: 7743 """ if not headers or not doc: return None try: url, date, digest, length = None, None, None, None for header in headers: if header.startswith("WARC-Target-URI:"): url = header.split()[1] elif header.startswith("WARC-Date:"): date = header.split()[1] elif header.startswith("WARC-Block-Digest:"): digest = header.split()[1] elif header.startswith("Content-Length:"): length = int(header.split()[1]) except Exception: # logger.warning("Can't parse header:", e, headers, doc) return None # Docs are separated by two empty lines. last = None if not doc[-1] and not doc[-2]: last = -2 title, doc = doc[0], doc[1:last] return { "url": url, "date_download": date, "digest": digest, "length": length, "nlines": len(doc), "source_domain": urlparse(url).netloc, "title": title, "raw_content": "\n".join(doc), } def group_by_docs(warc_lines: Iterable[str]) -> Iterable[dict]: doc: List[str] = [] headers, read_headers = [], True for warc in warc_lines: warc = warc.strip() if read_headers: headers.append(warc) read_headers = warc != "" continue if warc == "WARC/1.0": # We reached the beginning of the new doc. parsed = parse_doc(headers, doc) if parsed is not None: yield parsed headers, doc, read_headers = [warc], [], True continue doc.append(warc) # Return the last document if doc: parsed = parse_doc(headers, doc) if parsed is not None: yield parsed def _close_when_exhausted(file) -> Iterable[str]: with file: yield from file def open_segment_file(segment: str, verbose: bool = True) -> Iterable[str]: """ overwrite the open_segment function to get the WET file from the folder args: segment: path to the WET file """ filename = Path(segment) if filename.suffix == ".gz": file: TextIO = gzip.open(filename, "rt") # type: ignore else: file = open(filename, "rt") return _close_when_exhausted(file) def process_segment_file(segment: str, verbose: bool = True) -> Iterable[dict]: for doc in group_by_docs(open_segment_file(segment, verbose=verbose)): doc["cc_segment"] = segment yield doc def find_wet_files(directory): """find *.wet, *wet.gz files recursively""" return glob.glob(os.path.join(directory, "**/*.wet"), recursive=True) + glob.glob( os.path.join(directory, "**/*.wet.gz"), recursive=True ) WET_URL_ROOT = "https://data.commoncrawl.org" FileDescriptor = Union[Path, List[Path], str] ReadableFileLike = Union[Iterable[str], FileDescriptor, None] def _tmp(prefix: str = None, suffix: str = None, dir: Path = None) -> Path: if isinstance(prefix, Path): prefix = str(prefix) if isinstance(suffix, Path): suffix = str(suffix) _, tmp_path = tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir) return Path(tmp_path) def _yield_from(files: list) -> Iterable[str]: for file in files: yield from open_read(file) def open_read(filename: ReadableFileLike) -> Iterable[str]: """Open the given file, list of files or files matching the given glob and read lines. `filename` is None or "-" -> reads from stdin `filename` is a Path / str -> interprets filename as a glob and open files matching it `filename` is a list -> opens sequentially all files from the list using `open_read` `filename` is something else -> returns the object wrapped in a `nullcontext` This allows to pass already openened files or iterables. `open_read` will decompress gzip files, given they have ".gz" suffix. """ if filename is None: return sys.stdin if isinstance(filename, list): assert isinstance(filename[0], Path) if len(filename) == 0: return [] if len(filename) > 1: return _yield_from(filename) filename = tp.cast(Path, filename[0]) if isinstance(filename, str): if filename.startswith("http://") or filename.startswith("https://"): return open_remote_file(filename) filename = Path(filename) if not isinstance(filename, Path): # we might have received an iterable, return it unmodified. return filename # type: ignore # Expand glob patterns only when reading files = [Path(f) for f in sorted(glob.glob(str(filename)))] if len(files) > 1: return _yield_from(files) if len(files) == 1: filename = files[0] assert isinstance(filename, Path) if filename.suffix == ".gz": file: TextIO = gzip.open(filename, "rt") # type: ignore else: file = open(filename, "rt") return _close_when_exhausted(file) def request_get_content(url: str, n_retry: int = 3, verbose: bool = True) -> bytes: """Retrieve the binary content at url. Retry on connection errors. """ t0 = time.time() if verbose: # TODO: Logging will be activated later # logging.info(f"Starting download of {url}") print(f"Starting download of {url}") for i in range(1, n_retry + 1): try: with requests.Session() as session: r = session.get(url) r.raise_for_status() break except requests.exceptions.RequestException as e: # Sleep and try again on error, unless it's a 404. message = e.args[0] if isinstance(e.args[0], str) else "" if i == n_retry or "Client Error" in message: raise e warnings.warn(f"Swallowed error {e} while downloading {url} ({i} out of {n_retry})") time.sleep(10 * 2**i) if verbose: dl_time = time.time() - t0 dl_speed = len(r.content) / dl_time / 1024 # logging.info( # f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)" # ) print(f"Downloaded {url} [{r.status_code}] took {dl_time:.0f}s ({dl_speed:.1f}kB/s)") return r.content def open_remote_file(url: str, cache: Path, verbose: bool = True) -> Iterable[str]: """ Download the files at the given url to memory and opens it as a file. Assumes that the file is small, and fetch it when this function is called. """ if cache and cache.exists(): return open_read(cache) # TODO: open the remote file in streaming mode. # The hard part is that we need to write the content on disk at the same time, # to implement disk caching. raw_bytes = request_get_content(url, verbose=verbose) content = io.BytesIO(raw_bytes) if url.endswith(".gz"): f: TextIO = gzip.open(content, mode="rt") # type: ignore else: f = io.TextIOWrapper(content) try: # The file might have been created even not fully downloaded/written # so make sure tmp_cache is deleted when the program exits. # and only replace the cache file when the download is complete. if cache and not cache.exists(): tmp_cache = _tmp(cache) tmp_cache.write_bytes(raw_bytes) if not cache.exists(): tmp_cache.replace(cache) finally: if tmp_cache.exists(): tmp_cache.unlink() return _close_when_exhausted(f) def cc_wet_paths_url(dump_id: str) -> str: return "/".join([WET_URL_ROOT, "crawl-data", "CC-MAIN-" + dump_id, "wet.paths.gz"]) def segment_url(segment: str): return "/".join((WET_URL_ROOT, segment)) def cc_segment_urls(dump_id: str, cache_dir: Path, verbose: bool = True) -> List[str]: wet_paths = cc_wet_paths_url(dump_id) wet_paths_cache = cache_dir / f"wet_{dump_id}.paths.gz" f = open_remote_file(wet_paths, cache=wet_paths_cache, verbose=verbose) return [segment.strip() for segment in f] def open_segment_url(segment: str, cache_dir: Path, verbose: bool = True) -> Iterable[str]: url = segment_url(segment) file: Optional[Path] = None if cache_dir: file = cache_dir / segment.split("/")[-1] return open_remote_file(url, cache=file, verbose=verbose) def process_segment_url(segment: str, cache_dir: Path, verbose: bool = True) -> Iterable[str]: for doc in group_by_docs(open_segment_url(segment, cache_dir, verbose=verbose)): doc["cc_segment"] = segment yield doc @register_etl def data_ingestion___common_crawl___wet2raw( spark, wet_path: str, segment_n: int = -1, repartition=20, seed: int = 42, verbose=True, *args, **kwargs, ) -> RDD: """ Load WET files and convert them to raw format as a dictionary. [ what is WET? ] - WET files which store extracted plain text from the data stored in the WARC. Args: spark: The Spark session. wet_path: The path to the WET folder that includes WET format files. This search recursively, so you don't need to specify the path to each WET file. This search for all the *.wet, *.gz files in the folder. segment_n: The number of segments to load. This is a sampling parameter. One segment is about 1GB. Set as -1 (default) to load all the segments. repartition: The number of partitions. seed: The random seed. verbose: Whether to print the information of the dataset. Returns: rdd: The RDD containing the converted raw data. """ wet_paths = find_wet_files(wet_path) if segment_n > 0 and segment_n < len(wet_paths): np.random.seed(seed) wet_paths = np.random.choice(wet_paths, size=segment_n, replace=False) rdd = spark.sparkContext.parallelize(wet_paths) rdd = rdd.flatMap(functools.partial(process_segment_file, verbose=verbose)) rdd = rdd.repartition(repartition) return rdd @register_etl def data_ingestion___common_crawl___dump2raw( spark, dump: str, segment_n: int = -1, repartition: int = 20, use_cache: bool = True, cache_dir: str = None, seed: int = 42, verbose: bool = True, *args, **kwargs, ) -> RDD: """ Ingests data from Common Crawl dump and converts it to raw format. Args: spark (SparkSession): The Spark session. dump (str): The dump ID of the Common Crawl. For example, '2023-23'. segment_n (int, optional): The number of segments to load. Default is -1, which loads all segments. Note that one segment is about 1GB. repartition (int, optional): The number of partitions. Default is 20. use_cache (bool, optional): Whether to use the cache. Default is True. If you want to save disk space, set as False because the size of cache can be large. FYI, on WET dump is about 10TB. cache_dir (str, optional): The cache path to save the dataset. seed (int, optional): The random seed. Default is 42. verbose (bool, optional): Whether to print the information of the dataset. Default is True. Returns: RDD: The RDD containing the processed data. """ if use_cache: if cache_dir is None: # save the parquet at package root path cache_dir = SystemSetting().CACHE_DIR cache_dir = f"{cache_dir}/.cache/dataverse/dataset/common_crawl_{dump}" else: cache_dir = f"{cache_dir}/common_crawl_{dump}" else: cache_dir = None if not isinstance(cache_dir, Path): cache_dir = Path(cache_dir) # if cache dir exist creat one if cache_dir and not cache_dir.exists(): cache_dir.mkdir(parents=True) wet_urls = cc_segment_urls(dump, cache_dir, verbose=verbose) if segment_n > 0 and segment_n < len(wet_urls): np.random.seed(seed) wet_urls = np.random.choice(wet_urls, size=segment_n, replace=False) rdd = spark.sparkContext.parallelize(wet_urls) rdd = rdd.flatMap( functools.partial( process_segment_url, cache_dir=cache_dir, verbose=verbose, ) ) rdd = rdd.repartition(repartition) return rdd def convert_bytes(data): if isinstance(data, bytes): return data.decode() if isinstance(data, dict): return {convert_bytes(key): convert_bytes(value) for key, value in data.items()} if isinstance(data, list): return [convert_bytes(element) for element in data] return data @register_etl def data_ingestion___common_crawl___raw2ufl(spark, data: RDD, *args, **kwargs): """ Converts raw format to UFL with custom template. Args: spark (SparkSession): The Spark session. data (RDD): The input data. Returns: The converted data in UFL format. """ def templatev1(data): new_data = {} new_data["id"] = get_uuidv1() new_data["name"] = "common_crawl" new_data["text"] = f"{data.get('raw_content', None)}" new_data["meta"] = json.dumps( convert_bytes( { "title": data.get("title", None), "url": data.get("url", None), "date_download": data.get("date_download", None), "digest": data.get("digest", None), "length": data.get("length", None), "nlines": data.get("nlines", None), "source_domain": data.get("source_domain", None), "cc_segment": data.get("cc_segment", None), } ) ) return new_data data = data.map(lambda x: templatev1(x)) return data ================================================ FILE: dataverse/etl/data_ingestion/csv.py ================================================ """ Load CSV data Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import List, Union from pyspark.rdd import RDD from dataverse.etl import register_etl # from dataverse.utils.format import huggingface2parquet, load_huggingface_dataset @register_etl def data_ingestion___csv___csv2raw( spark, path: Union[str, List[str]], repartition: int = 20, verbose: bool = True, *args, **kwargs ) -> RDD: """ Converts CSV data to raw RDD. Args: spark (SparkSession): The Spark session. path (Union[str, List[str]]): The path(s) to the CSV file(s). repartition (int, optional): The number of partitions for the RDD. Defaults to 20. verbose (bool, optional): Whether to print the information of the dataset. Returns: RDD: The raw RDD containing the CSV data. """ if isinstance(path, str): path = [path] df = spark.read.csv(*path, header=True) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) return rdd ================================================ FILE: dataverse/etl/data_ingestion/cultura_x.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import json from pyspark.rdd import RDD from dataverse.etl import register_etl from dataverse.utils.format import get_uuidv1 @register_etl def data_ingestion___cultura_x___raw2ufl(spark, ufl: RDD, *args, **kwargs): """ Converts raw format to UFL with custom template. Args: spark (SparkSession): The Spark session object. ufl(RDD): The input DataFrame in raw format. Returns: RDD: The transformed DataFrame in UFL format. """ def templatev1(row): new_row = {} new_row["id"] = get_uuidv1() new_row["name"] = "cultura_x" new_row["text"] = row["text"] new_row["meta"] = json.dumps( { "url": row["url"], "timestamp": row["timestamp"], "source": row["source"], } ) return new_row ufl = ufl.map(lambda x: templatev1(x)) return ufl ================================================ FILE: dataverse/etl/data_ingestion/huggingface.py ================================================ """ Load Huggingface data This is used just to load huggingface dataset without any refomatting Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import List, Union from pyspark.rdd import RDD from dataverse.etl import register_etl from dataverse.utils.format import huggingface2parquet, load_huggingface_dataset @register_etl def data_ingestion___huggingface___hf2raw( spark, name_or_path: Union[str, List[str]], split: int = None, from_disk: bool = False, repartition: int = 20, verbose: bool = True, *args, **kwargs ) -> RDD: """ Convert a HuggingFace dataset to raw format as a dictionary. Args: spark (SparkSession): The Spark session. name_or_path (Union[str, List[str]]): The name or path of the HuggingFace dataset. split(int, optional): The split of the dataset. Defaults to None. from_disk(bool, optional): Whether to load from disk. Defaults to False. No split is allowed when from_disk is True. repartition(int, optional): The number of partitions. Defaults to 20. verbose(bool, optional): Whether to print the information of the dataset. Defaults to True. Returns: rdd: The converted dataset as an RDD of dictionaries. """ dataset = load_huggingface_dataset(name_or_path, split=split, from_disk=from_disk) parquet_path = huggingface2parquet(dataset, verbose=verbose) df = spark.read.parquet(parquet_path) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) return rdd ================================================ FILE: dataverse/etl/data_ingestion/parquet.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import List, Union from pyspark.rdd import RDD from dataverse.etl import register_etl @register_etl def data_ingestion___parquet___pq2raw( spark, path: Union[str, List[str]], repartition=20, *args, **kwargs ) -> RDD: """ Reads parquet files into an RDD and repartitions it. Args: spark (SparkSession): The Spark session. path (str or list): The path of the parquet files. repartition (int): The number of partitions. Returns: rdd: The repartitioned RDD containing the data from the parquet files. """ if isinstance(path, str): path = [path] df = spark.read.parquet(*path) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) return rdd ================================================ FILE: dataverse/etl/data_ingestion/red_pajama.py ================================================ """ Supported datasets: https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import List, Union from dataverse.etl import register_etl from dataverse.utils.format import ( get_uuidv1, huggingface2parquet, load_huggingface_dataset, ) """ 1 stage data ingestion - default ==================================== direct loading ufl with one ETL process """ def convert2ufl(row): row["id"] = get_uuidv1() row["name"] = "red_pajama" return row @register_etl def data_ingestion___red_pajama___parquet2ufl(spark, input_paths, repartition=20, *args, **kwargs): """ convert parquet file to ufl """ df = spark.read.parquet(*input_paths) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) rdd = rdd.map(lambda x: convert2ufl(x)) return rdd @register_etl def data_ingestion___red_pajama___hf2ufl( spark, name_or_path: Union[str, List[str]] = "togethercomputer/RedPajama-Data-1T-Sample", split=None, from_disk=False, repartition=20, verbose=True, *args, **kwargs ): """ convert huggingface dataset to ufl Args: spark (SparkSession): spark session name_or_path (str or list): the name or path of the huggingface dataset split (str): the split of the dataset from_disk (bool): whether to load from disk - no split is allowed when from_disk is True repartition (int): the number of partitions verbose (bool): whether to print the information of the dataset """ dataset = load_huggingface_dataset(name_or_path, split=split, from_disk=from_disk) parquet_path = huggingface2parquet(dataset, verbose=verbose) df = spark.read.parquet(parquet_path) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) rdd = rdd.map(lambda x: convert2ufl(x)) return rdd """ 2 stage data ingestion - default ==================================== loading ufl with custom template with two ETL process """ @register_etl def data_ingestion___red_pajama___hf2raw( spark, name_or_path: Union[str, List[str]] = "togethercomputer/RedPajama-Data-1T-Sample", split=None, repartition=20, verbose=True, *args, **kwargs ): """ convert huggingface dataset to raw format as dict Args: spark (SparkSession): spark session name_or_path (str or list): the name or path of the huggingface dataset split (str): the split of the dataset repartition (int): the number of partitions verbose (bool): whether to print the information of the dataset """ dataset = load_huggingface_dataset(name_or_path, split=split) parquet_path = huggingface2parquet(dataset, verbose=verbose) df = spark.read.parquet(parquet_path) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) return rdd @register_etl def data_ingestion___red_pajama___raw2ufl_templatev1(spark, ufl, *args, **kwargs): """ convert raw format to ufl with custom template """ def templatev1(row): row["id"] = get_uuidv1() row["name"] = "red_pajama" return row ufl = ufl.map(lambda x: templatev1(x)) return ufl @register_etl def data_ingestion___red_pajama___raw2ufl_templatev2(spark, ufl, *args, **kwargs): ... return ufl ================================================ FILE: dataverse/etl/data_ingestion/slim_pajama.py ================================================ """ Supported datasets: https://huggingface.co/datasets/cerebras/SlimPajama-627B Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import List, Union from dataverse.etl import register_etl from dataverse.utils.format import huggingface2parquet, load_huggingface_dataset @register_etl def data_ingestion___slim_pajama___parquet2ufl(spark, input_paths, repartition=20, *args, **kwargs): """ convert parquet file to ufl """ df = spark.read.parquet(*input_paths) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) return rdd @register_etl def data_ingestion___slim_pajama___hf2ufl( spark, name_or_path: Union[str, List[str]] = "cerebras/SlimPajama-627B", split=None, from_disk=False, repartition=20, verbose=True, *args, **kwargs ): """ convert huggingface dataset to ufl Args: spark (SparkSession): spark session name_or_path (str or list): the name or path of the huggingface dataset split (str): the split of the dataset from_disk (bool): whether to load from disk - no split is allowed when from_disk is True repartition (int): the number of partitions verbose (bool): whether to print the information of the dataset """ dataset = load_huggingface_dataset(name_or_path, split=split, from_disk=from_disk) parquet_path = huggingface2parquet(dataset, verbose=verbose) df = spark.read.parquet(parquet_path) rdd = df.rdd.repartition(repartition) rdd = rdd.map(lambda row: row.asDict()) return rdd ================================================ FILE: dataverse/etl/data_ingestion/test.py ================================================ """ special purpose to create fake data for testing or debugging Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import json from faker import Faker from pyspark.rdd import RDD from dataverse.etl import register_etl @register_etl def data_ingestion___test___generate_fake_ufl( spark, n: int = 100, repartition: int = 20, verbose: bool = True, *args, **kwargs ) -> RDD: """ Generate fake data for testing or debugging. Args: spark (SparkSession): The Spark session object. n (int, optional): The number of data to generate. Default is 100. repartition (int, optional): The number of partitions. Default is 20. verbose (bool, optional): Whether to print the information of the dataset. Default is True. Returns: RDD: The generated fake data RDD. """ faker = Faker() def _generate_fake_ufl(n=100): while n > 0: n -= 1 yield { "id": faker.uuid4(), "name": "test_fake_ufl", "text": faker.text(), "meta": json.dumps( { "name": faker.name(), "age": faker.random_int(0, 100), "address": faker.address(), "job": faker.job(), } ), } rdd = spark.sparkContext.parallelize(_generate_fake_ufl(n=n)) rdd = rdd.repartition(repartition) return rdd ================================================ FILE: dataverse/etl/data_save/README.md ================================================ # Data Save > How to save data to the destination? In other words, how to save the data to the destination? ## 🌌 Naming Convention - TBD ## 🌌 Supported Data Save Method - AWS (S3) - HuggingFace (Dataset) - Parquet ================================================ FILE: dataverse/etl/data_save/__init__.py ================================================ ================================================ FILE: dataverse/etl/data_save/aws.py ================================================ """ TODO: Data saving to AWS S3 This is not implemented yet. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ # TODO ================================================ FILE: dataverse/etl/data_save/huggingface.py ================================================ """ Data saving to Huggingface Datasets Huggingface support spark natively! https://huggingface.co/docs/datasets/use_with_spark Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import os from typing import Union from datasets import Dataset from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl @register_etl def data_save___huggingface___ufl2hf_hub(spark, ufl, hub_path, repartition=1, *args, **kwargs): """ TODO: Save data to Hugging Face dataset and upload to hub. """ NotImplementedError() return None @register_etl def data_save___huggingface___ufl2hf( spark, ufl: Union[RDD, DataFrame], save_path: str, repartition: int = 1, *args, **kwargs ) -> str: """ Save data to HuggingFace dataset and return the path. Args: spark(sparkSession): The Spark session. ufl(Union[RDD, DataFrame]):The input data to be saved. save_path(str): The path to save the HF dataset. repartition(int, optional): The number of partitions to repartition the data. Defaults to 1. Raises: ValueError: If the save_path already exists. AssertionError: If ufl is not an RDD or DataFrame. Returns: str: The path where the HuggingFace dataset is saved. """ if os.path.exists(save_path): raise ValueError(f"save_path {save_path} already exists") if isinstance(ufl, RDD): ufl = ufl.toDF() assert isinstance(ufl, DataFrame), f"ufl must be RDD or DataFrame, got {type(ufl)}" ufl = ufl.repartition(repartition) hf_dataset = Dataset.from_spark(ufl) hf_dataset.save_to_disk(save_path) return save_path @register_etl def data_save___huggingface___ufl2hf_obj( spark, ufl: Union[RDD, DataFrame], repartition: int = 1, *args, **kwargs ) -> Dataset: """ Convert data to HuggingFace dataset object. Args: spark(sparkSession): The Spark session. ufl(Union[RDD, DataFrame]):The input data to be saved. repartition(int, optional): The number of partitions to repartition the data. Defaults to 1. Returns: Dataset: The HuggingFace dataset object. Raises: AssertionError: If the input data is not RDD or DataFrame. """ if isinstance(ufl, RDD): ufl = ufl.toDF() assert isinstance(ufl, DataFrame), f"ufl must be RDD or DataFrame, got {type(ufl)}" ufl = ufl.repartition(repartition) hf_dataset = Dataset.from_spark(ufl) return hf_dataset ================================================ FILE: dataverse/etl/data_save/parquet.py ================================================ """ Data saving to Parquets Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import os from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl @register_etl def data_save___parquet___ufl2parquet( spark, ufl: Union[RDD, DataFrame], save_path: str, repartition: int = 1, *args, **kwargs, ) -> str: """ Save data to parquet and return the path. Args: spark(sparkSession): The Spark session. ufl(Union[RDD, DataFrame]):The input data to be saved. save_path(str): The path to save the HF dataset. repartition(int, optional): The number of partitions to repartition the data. Defaults to 1. Raises: ValueError: If the save_path already exists. Returns: str: The path where the parquet file is saved. """ if os.path.exists(save_path): raise ValueError(f"save_path {save_path} already exists") if isinstance(ufl, RDD): ufl = ufl.toDF() assert isinstance(ufl, DataFrame), f"ufl must be RDD or DataFrame, got {type(ufl)}" ufl = ufl.repartition(repartition) ufl.write.parquet(save_path, mode="overwrite") return save_path ================================================ FILE: dataverse/etl/decontamination/README.md ================================================ ================================================ FILE: dataverse/etl/decontamination/__init__.py ================================================ ================================================ FILE: dataverse/etl/deduplication/README.md ================================================ # Deduplication > Deduplication is the process of removing duplicate records from a dataset. Normally this is clustered in 2 big categories: - **Exact Deduplication**: remove exact duplicate records - **Fuzzy Deduplication**: remove records that are similar to each other ☣️ **caveat**️ ☣️ > When we cluster sub-categories with just 2 big categories, it seems waste of space. So here temporalily we cluster sub-categories with more detailed categories. - part of name of full name (e.g. minhash) - open source name - etc But we will change this to much better cluster in the future. And we need your help! 💡Any ideas are welcomed!💡 ## 🌌 Exact Deduplication > Exact Deduplication is the process of removing exact duplicate records from a dataset. ## 🌌 Fuzzy Deduplication > Fuzzy Deduplication is the process of removing records that are similar to each other from a dataset. ================================================ FILE: dataverse/etl/deduplication/__init__.py ================================================ ================================================ FILE: dataverse/etl/deduplication/common_crawl.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import functools from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from pyspark.sql import functions as F from pyspark.sql.functions import collect_list, posexplode, split from dataverse.etl.registry import register_etl def filter_lines(row, subset="text"): row = row.asDict() text = row[subset] line_ids = row["line_ids"] text_lines = text.split("\n") filtered_texts = "\n".join([text_lines[line_i] for line_i in sorted(line_ids)]) del row["line_ids"] row[subset] = filtered_texts return row @register_etl def deduplication___common_crawl___exact_line( spark, data: Union[RDD, DataFrame], subset="text", *args, **kwargs ) -> RDD: """ Performs exact line by line deduplication on the given data. Strip and lower is applied to the line text before deduplication but this will not be applied to the original text. Examples: - input +--------+ | text| +========+ | DuckY| +--------+ | dUKCY| +--------+ - output +--------+ | text| +========+ | DuckY| +--------+ Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be deduplicated.. subset (str, optional): A subset or column to consider. Defaults to 'text'. Returns: rdd: The deduplicated data. Raises: AssertionError: If the input data is not a DataFrame. """ if isinstance(data, RDD): data = data.toDF() data = data.cache() data = data.withColumn("__id__", F.monotonically_increasing_id()) assert isinstance(data, DataFrame), f"data must be DataFrame, got {type(data)}" line_data = data.select( "__id__", posexplode(split(data[subset], "\n")).alias("line_id", "line") ) line_data = line_data.withColumn("line", F.lower(F.trim(line_data["line"]))) line_data = line_data.dropDuplicates(subset=["line"]) line_data = line_data.groupBy("__id__").agg(collect_list("line_id").alias("line_ids")) merged_data = data.join(line_data, on=["__id__"], how="inner") data.unpersist() line_data.unpersist() # remove __id__ merged_data = merged_data.drop("__id__") # filter the lines using the line_ids merged_data = merged_data.rdd.map(functools.partial(filter_lines, subset=subset)) return merged_data ================================================ FILE: dataverse/etl/deduplication/exact.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import List, Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def deduplication___exact___column( spark, data: Union[RDD, DataFrame], subset: List[str] = ["text"], *args, **kwargs ): """ Exact column deduplication Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be deduplicated.. subset(List[str]): Subset of columns to consider for duplication check. Default to ['text']. Returns: Deduplicated DataFrame object """ if isinstance(data, RDD): data = data.toDF() assert isinstance(data, DataFrame), f"data must be DataFrame, got {type(data)}" data = data.dropDuplicates(subset=subset) return data ================================================ FILE: dataverse/etl/deduplication/minhash.py ================================================ """ Code is from ChenghaoMou/text-dedup https://github.com/ChenghaoMou/text-dedup/blob/main/text_dedup/minhash_spark.py This is a migration of the code to Dataverse. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import hashlib import functools import re import os import struct import sys from itertools import tee from operator import add from typing import Any, List, Text, Tuple, Union import numpy as np import pyspark from pyspark.rdd import RDD from pyspark.sql import DataFrame, SparkSession from pyspark.sql import functions as F from pyspark.sql import types as T from pyspark.ml.feature import NGram, RegexTokenizer from scipy.integrate import quad as integrate from dataverse.etl.registry import register_etl # region: Connected Components in MapReduce and Beyond, 2014 def generate_edges(nodes: List[int]) -> List[Tuple[int, int]]: """ Generate edges from a cluster. Instead of generating N^2 edges, we only need all nodes align to a single node, since we will be running connected components on the edges later. Parameters ---------- nodes : List[int] The list of nodes in the cluster. Returns ------- List[Tuple[int, int]] The list of edges. Examples -------- >>> generate_edges([1, 2, 3]) [(2, 1), (3, 1)] """ if len(nodes) <= 1: return [] min_node = min(nodes) return [(n, min_node) for n in nodes if n != min_node] def get_hash(text: str, n_bytes: int=8): return int.from_bytes( hashlib.sha1(text.encode("utf-8")).digest()[:n_bytes], sys.byteorder ) def get_signatures( shingles: List[str], band_n: int, row_per_band: int, mod_prime: int, hash_params: Tuple[np.ndarray] ): if not shingles: return [] shingles = np.array( [get_hash(shingle) for shingle in set(shingles)], dtype=np.uint64 ) signatures = np.full( shape=(band_n * row_per_band), fill_value=mod_prime, dtype=np.uint64 ) chunk_size = 2 ** 10 a, b = hash_params for i in range(0, len(shingles), chunk_size): shingles_chunk = shingles[i:i+chunk_size] signatures = np.minimum( signatures, np.min((shingles_chunk.reshape(-1, 1) * a + b) % mod_prime, axis=0) ) return [ f"{idx:02d}" \ + signatures[i*row_per_band:(i+1)*row_per_band].tobytes().hex() for idx, i in enumerate(range(band_n)) ] # region: MinHashLSH def optimal_param( threshold: float, num_perm: int, false_positive_weight: float = 0.5, false_negative_weight: float = 0.5, ): """ Compute the optimal `MinHashLSH` parameter that minimizes the weighted sum of probabilities of false positive and false negative, taken from datasketch. Parameters ---------- threshold : float The threshold for similarity. num_perm : int The number of permutations. false_positive_weight : float The weight of false positive. false_negative_weight : float The weight of false negative. Returns ------- Tuple[int, int] The optimal `b` and `r` parameters. The number of bands, and the number of rows per band respectively. Examples -------- >>> optimal_param(0.7, 256) (25, 10) """ def false_positive_area(threshold: float, b: int, r: int): """Source: `datasketch.lsh`""" def area(s): return 1 - (1 - s ** float(r)) ** float(b) a, _ = integrate(area, 0.0, threshold) return a def false_negative_area(threshold: float, b: int, r: int): """Source: `datasketch.lsh`""" def area(s): return 1 - (1 - (1 - s ** float(r)) ** float(b)) a, _ = integrate(area, threshold, 1.0) return a min_error = float("inf") opt = (0, 0) for b in range(1, num_perm + 1): max_r = int(num_perm / b) for r in range(1, max_r + 1): fp = false_positive_area(threshold, b, r) fn = false_negative_area(threshold, b, r) error = fp * false_positive_weight + fn * false_negative_weight if error < min_error: min_error = error opt = (b, r) return opt # region: Quality Control def process_cluster(cluster: List[Any]) -> List[Any]: return cluster[:1] @register_etl def deduplication___minhash___lsh_jaccard( spark: SparkSession, data: Union[RDD, DataFrame], threshold: float = 0.7, ngram_size: int = 5, min_length: int = 5, num_perm: int = 250, band_n: int = None, row_per_band: int = None, id_col: Union[str, None] = None, subset: str = "text", seed: int = 42, duplicates_save_path: Union[str, None] = None, *args, **kwargs, ) -> RDD: """ Fuzzy deduplication using MinHash and Locality Sensitive Hashing (LSH). Args: spark (SparkSession): The SparkSession object. data (Union[RDD, DataFrame]): Input data to be deduplicated. threshold (float, optional): Similarity threshold. Default is 0.7. ngram_size (int, optional): Size of n-grams. Default is 5. min_length (int, optional): Minimum token length of document to be considered. Default is 5. num_perm (int, optional): Number of permutations. Default is 250. band_n (int, optional): Number of bands. If not provided, it will be calculated based on the threshold and num_perm. row_per_band (int, optional): Number of rows per band. If not provided, it will be calculated based on the threshold and num_perm. id_col (str, optional): Key column for extract duplicated rows. If not provided, temporary id column will be created. subset (str, optional): Column to deduplicate on. Default is "text". seed (int, optional): Random seed. Default is 42. duplicates_save_path (str, optional): Save path for duplicated entries. If not provided, not saving the duplicates. Returns: RDD: Deduplicated data as a DataFrame. """ spark.sparkContext.setCheckpointDir("checkpoint") from graphframes import GraphFrame if isinstance(data, RDD): data_df = data.toDF() elif isinstance(data, DataFrame): data_df = data if ( duplicates_save_path is not None and os.path.exists(duplicates_save_path) ): assert "duplicates_save_path already exists." temp_id_col, component_col, tokens_col, ngrams_col = \ "__id__", "__component__", "__tokens__", "__ngrams__" exist_cols = set(data_df.columns) while True: if temp_id_col in exist_cols: temp_id_col += "_" elif component_col in exist_cols: component_col += "_" elif tokens_col in exist_cols: tokens_col += "_" elif ngrams_col in exist_cols: ngrams_col += "_" else: break if id_col is None: id_col = temp_id_col print(f"create temp id col: {id_col}") data_df = data_df.withColumn(id_col, F.monotonically_increasing_id()) data_df.persist(pyspark.StorageLevel.DISK_ONLY) if band_n is None or row_per_band is None: band_n, row_per_band = optimal_param(threshold, num_perm) mod_prime = 1 << 61 - 1 gen = np.random.RandomState(seed) hash_params = ( gen.randint(1, mod_prime, dtype=np.uint64, size=band_n * row_per_band), gen.randint(0, mod_prime, dtype=np.uint64, size=band_n * row_per_band), ) subset_type: str = [t for c, t in data_df.dtypes if c == subset][0] if subset_type.startswith("str"): # assume subset col should be tokenized tokens_df = RegexTokenizer( inputCol=subset, outputCol=tokens_col, pattern="\\W" ).transform( data_df .select(id_col, F.col(subset).substr(1, 10_000_000).alias(subset)) ).select( id_col, tokens_col ).filter( F.size(tokens_col) >= min_length ) elif subset_type.startswith("array"): print("already tokenized.") tokens_col = subset tokens_df = data_df.select(id_col, tokens_col) shingles_df = NGram( n=ngram_size, inputCol=tokens_col, outputCol=ngrams_col ).transform(tokens_df).select(id_col, ngrams_col) sig_udf = F.udf( functools.partial( get_signatures, band_n=band_n, row_per_band=row_per_band, mod_prime=mod_prime, hash_params=hash_params ), returnType=T.ArrayType(T.StringType()) ) signature_df = ( shingles_df .select(id_col, F.explode(sig_udf(ngrams_col)).alias("band")) .groupby("band") .agg( F.collect_set(id_col).alias("ids") ) ) edge_udf = F.udf( generate_edges, returnType=T.ArrayType(T.ArrayType(data_df.schema[id_col].dataType)) ) edges_df = ( signature_df .select("ids") .filter(F.size("ids") > 1) .select(F.explode(edge_udf("ids")).alias("edges")) .distinct() .selectExpr("edges[0] as src", "edges[1] as dst") ).persist(pyspark.StorageLevel.DISK_ONLY) count = edges_df.count() if count == 0: print("no entry for deduplication.") edges_df.unpersist() data_df.unpersist() return data vertices_df = ( edges_df .selectExpr("src as id") .union(edges_df.selectExpr("dst as id")) .distinct() ) assignment = ( GraphFrame(vertices_df, edges_df) .connectedComponents(broadcastThreshold=200 * (1024 ** 2)) ) join_df = data_df.join( assignment.select( F.col("id").alias(id_col), F.col("component").alias(component_col) ), on=id_col, how="left" ) if duplicates_save_path is not None: duplicates_df = ( join_df .filter(F.col(component_col).isNotNull()) .drop(ngrams_col) ) if id_col == temp_id_col: duplicates_df = duplicates_df.drop(id_col) if tokens_col != subset: duplicates_df = duplicates_df.drop(tokens_col) duplicates_df.write.parquet(duplicates_save_path) duplicates_df.unpersist() final_df = ( join_df .filter(F.col(component_col).isNull()) .union( join_df .filter(F.col(component_col).isNotNull()) .dropDuplicates([component_col]) ) .drop(component_col, ngrams_col) ) if id_col == temp_id_col: final_df = final_df.drop(id_col) if tokens_col != subset: final_df = final_df.drop(tokens_col) edges_df.unpersist() return final_df.rdd ================================================ FILE: dataverse/etl/deduplication/polyglot.py ================================================ """ Code is from EleutherAI/dps https://github.com/EleutherAI/dps/blob/master/dps/spark/jobs/dedup_job.py This is a migration of the deduplication job from the DPS project to the Dataverse. Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import binascii import random from itertools import combinations from typing import List, Union import numpy as np from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl MERSENNE_PRIME = (1 << 61) - 1 MAX_HASH = (1 << 32) - 1 HASH_RANGE = 1 << 32 def shingle_word(text: str, n_gram: int = 15, char_level: bool = False) -> List[str]: """ example ------- >>> shingle_word("hello world from ducky", n_gram=2) ['hello_world', 'world_from', 'from_ducky'] >>> shingle_word("hello world from ducky", n_gram=2, char_level=True) ['h_e', 'e_l', 'l_l', 'l_o', 'o_w', 'w_o', 'o_r', 'r_l', 'l_d', 'd_f', 'f_r', 'r_o', 'o_m', 'm_d', 'd_u', 'u_c', 'c_k', 'k_y'] """ res = [] text_words = text.split() if not char_level else text for i in range(len(text_words)): shingle = text_words[i : i + n_gram] if len(shingle) == n_gram: res.append("_".join(shingle).encode("utf-8")) return res def generate_minhash(shingles: List, num_perm: int = 64, seed: int = 1) -> np.array: def hashfunc(b: bytes) -> bytes: return binascii.crc32(b) & MAX_HASH hashvalues = np.ones(num_perm, dtype=np.uint64) * MAX_HASH generator = np.random.RandomState(seed) permutations = np.array( [ ( generator.randint(1, MERSENNE_PRIME, dtype=np.uint64), generator.randint(0, MERSENNE_PRIME, dtype=np.uint64), ) for _ in range(num_perm) ], dtype=np.uint64, ).T for shingle in shingles: hv = hashfunc(shingle) a, b = permutations phv = np.bitwise_and((a * hv + b) % MERSENNE_PRIME, np.uint64(MAX_HASH)) hashvalues = np.minimum(phv, hashvalues) return hashvalues def jaccard_by_hashvalues(src_hashvalues, tgt_hashvalues) -> float: if len(src_hashvalues) != len(tgt_hashvalues): raise ValueError() return np.float(np.count_nonzero(src_hashvalues == tgt_hashvalues)) / np.float( len(src_hashvalues) ) def expand_instances_by_minhash( data, expand_size: int, n_gram: int, seed: int = 1, char_level: bool = False ): shingles = shingle_word(data["text"], n_gram=n_gram, char_level=char_level) minhashes = generate_minhash(shingles, num_perm=expand_size, seed=seed) for mh in minhashes.tolist(): yield (str(mh), [dict(**data, shingles=shingles, hashvalues=minhashes)]) def explore_dedup_instance(hash_groups, threshold: float = 0.8): if len(hash_groups) <= 1: return group_represent_text = hash_groups[0]["text"] # not to remove all text instances in group. pairs = combinations(hash_groups, 2) for d_1, d_2 in pairs: sim_score = jaccard_by_hashvalues(d_1["hashvalues"], d_2["hashvalues"]) if sim_score >= threshold: dedup_text = [d_1["text"], d_2["text"]] if group_represent_text in dedup_text: yield dedup_text[0] if dedup_text[0] != group_represent_text else dedup_text[1] else: yield random.choice(dedup_text) @register_etl def deduplication___polyglot___minhash( spark, data: Union[RDD, DataFrame], expand_size: int = 64, n_gram: int = 15, seed: int = 1, char_level: bool = False, sim_threshold: float = 0.8, *args, **kwargs, ): """ Fuzzy deduplication using MinHash algorithm. Args: spark (SparkSession): The SparkSession object. data (Union[RDD, DataFrame]): The input data to be deduplicated. expand_size (int, optional): The size of expansion for each instance. Defaults to 64. n_gram (int, optional): The size of n-gram for tokenization. Defaults to 15. seed (int, optional): The seed value for random number generation. Defaults to 1. char_level (bool, optional): Whether to use character-level tokenization. Defaults to False. sim_threshold (float, optional): The similarity threshold for deduplication. Defaults to 0.8. *args: Additional positional arguments. **kwargs: Additional keyword arguments. Returns: RDD or DataFrame: The deduplicated data. Raises: None Examples: >>> deduplication___polyglot___minhash()(spark, data, expand_size=128, sim_threshold=0.9) """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) overlap_kv_rdd: RDD = ( data.flatMap( lambda x: expand_instances_by_minhash( x, expand_size=expand_size, n_gram=n_gram, seed=seed, char_level=char_level, ) ) .reduceByKey(lambda x, y: x + y) .flatMap(lambda x: explore_dedup_instance(x[1], threshold=sim_threshold)) .distinct() .map(lambda x: (x, dict(text=x))) .cache() ) data = data.map(lambda x: (x["text"], x)).subtractByKey(overlap_kv_rdd).map(lambda x: x[1]) return data ================================================ FILE: dataverse/etl/pii/README.md ================================================ # PII (Personally Identifiable Information) > Replacing, Removing, and Anonymizing PII ## 🌌 Naming Convention > This is a strong recommendation. You can use your own naming convention if you want. ```python def cleaning___[ETL Sub-Category]___[ETL Process]() ``` - `ETL Sub-Category` - the `PII` type - e.g. card number - e.g. email - e.g. phone number - `ETL process name` - what you are doing to the `PII` - e.g. remove - e.g. replace ================================================ FILE: dataverse/etl/pii/__init__.py ================================================ ================================================ FILE: dataverse/etl/pii/card.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import random import re from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def pii___card___replace_card_number( spark, data: Union[RDD, DataFrame], subset: str = "text", pattern: str = r"(\d{4}-\d{4}-\d{4}-\d{4})", random_pii: bool = True, replace_pii: bool = False, replace_token: str = "[CARD_NUMBER]", start_token: str = "", end_token: str = "", *args, **kwargs, ) -> RDD: r""" Replace card number with a random number or a token Args: spark: The SparkSession object. data (Union[RDD, DataFrame]): The input data to process. subset (str, optional): The subset or columns to consider. Defaults to 'text'. pattern (str, optional): The regex pattern to find. Defaults to r'(\d{4}-\d{4}-\d{4}-\d{4})'. random_pii (bool, optional): If True, replace the pii with a random number. Defaults to True. replace_pii (bool, optional): If True, replace the pii with the `replace_token`. Defaults to False. replace_token (str, optional): The token to replace the pii with. Defaults to '[CARD_NUMBER]'. start_token (str, optional): The start token to append where the pattern is found. Defaults to ''. end_token (str, optional): The end token to append where the pattern is found. Defaults to ''. Returns: RDD: The processed data. Caveats: - `replace_pii` takes precedence over `random_pii` - e.g when both are True, the card number will be replaced with the token - e.g. this is 1234-1234-1234-1234 -> this is [CARD_NUMBER] - `start_token` and `end_token` are used to append the token to the start and end of the card number - it doens't matter with `random_card_number` or `replace_card_number` is True or False Examples: <input> - text = 'card number is 1234-1234-1234-1234.' <output> - random pii - text = 'card number is 2238-1534-1294-1274.' - replace pii - replace_token = '[CARD_NUMBER]' - text = 'card number is [CARD_NUMBER].' - start token - start_token = '[CARD_NUMBER_START]' - text = 'card number is [CARD_NUMBER_START]1234-1234-1234-1234.' - end token - end_token = '[CARD_NUMBER_END]' """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _replace_match(match): match = match.group() if replace_pii: match = replace_token elif random_pii: match = re.sub(r"\d", lambda x: str(random.randint(0, 9)), match) return f"{start_token}{match}{end_token}" def _replace_pii(row): row[subset] = re.sub(pattern, _replace_match, row[subset]) return row data = data.map(_replace_pii) return data ================================================ FILE: dataverse/etl/pii/nin.py ================================================ """ NIN (National Identification Number) ===================================== A national identification number, national identity number, or national insurance number or JMBG/EMBG is used by the governments of many countries as a means of tracking their citizens, permanent residents, and temporary residents for the purposes of work, taxation, government benefits, health care, and other governmentally-related functions. https://en.wikipedia.org/wiki/National_identification_number Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import random import re from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl @register_etl def pii___nin___replace_korean_rrn( spark, data: Union[RDD, DataFrame], subset: str = "text", pattern: str = r"\d{6}-\d{7}", random_pii: bool = True, replace_pii: bool = False, replace_token: str = "[NIN]", start_token: str = "", end_token: str = "", *args, **kwargs, ) -> RDD: r""" Replace Korean RRN (Resident Registration Number) with a random number or a token Args: spark (SparkSession): The Spark session object. data(Union[RDD, DataFrame]): The input data to be processed. subset(str, optional): A subset or column to consider. Defaults to 'text'. pattern(str, optional): The regex pattern to find. Defaults to r'\d{6}-\d{7}'. random_pii(str, optional): If True, replace the pii with a random number. Defaults to True. replace_pii(bool, optional): If True, replace the pii with the `replace_token`. Defaults to False. replace_token(bool, optional): The token to replace the pii with. Defaults to '[NIN]'. start_token(str, optional): The start token to append where the pattern is found. Defaults to ''. end_token(str, optional): The end token to append where the pattern is found. Defaults to ''. Returns: rdd: The processed data with replaced Korean RRN. Caveats: - `replace_pii` takes precedence over `random_pii` - `start_token` and `end_token` are used to append the token to the start and end of the number - it doens't matter with `random_pii` or `replace_pii` is True or False Examples: <input> - text = 'nin is 123456-1234567' <output> - random pii - text = 'nin is 141124-1244121' - replace pii - replace_token = '[NIN]' - text = 'nin is [NIN].' - start token - start_token = '[NIN_START]' - text = 'nin is [NIN_START]123456-1234567' - end token - end_token = '[NIN_END]' - text = 'nin is 123456-1234567[NIN_END].' """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _replace_match(match): match = match.group() if replace_pii: match = replace_token elif random_pii: match = re.sub(r"\d", lambda x: str(random.randint(0, 9)), match) return f"{start_token}{match}{end_token}" def _replace_pii(row): row[subset] = re.sub(pattern, _replace_match, row[subset]) return row data = data.map(_replace_pii) return data ================================================ FILE: dataverse/etl/pipeline.py ================================================ """ ETL Interface ---------------------- user will be interacting with this interface Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import time from pathlib import Path from typing import Union import boto3 from omegaconf import DictConfig, OmegaConf from pyspark.conf import SparkConf from pyspark.sql import SparkSession from dataverse.config import Config from dataverse.etl import ETLRegistry from dataverse.utils.api import AWSClient, EMRManager, aws_check_credentials from dataverse.utils.setting import SystemSetting class ETLPipeline: """ ETL Pipeline. This class represents an ETL (Extract, Transform, Load) pipeline. It provides methods for managing and executing ETL processes. Attributes: registry (ETLRegistry): The registry of ETL processes. Examples: >>> etl_pipeline = ETLPipeline() >>> etl_pipeline.status() >>> etl_pipeline.search('data_ingestion', 'ufl') >>> spark, data = etl_pipeline.sample() >>> config = Config.default() >>> etl_pipeline.run(config = config) """ def __init__(self): self.registry = ETLRegistry() def __len__(self): return len(self.registry) def status(self): """ Get the status of the registry. Returns: str: The status of the registry. Raises: None Examples: >>> etl_pipeline = EtlPipeline() >>> etl_pipeline.status() 'If you need details of ETL Registry use `etl_pipeline.search()`' Note: This method does not show detailed information. It will only info about category . """ print("If you need details of ETL Registry use `etl_pipeline.search()`") return str(self.registry) def search(self, category=None, sub_category=None): """ Get detailed status of the registry by searching. This function lets you know category, sub_category, and etl_name. Args: category (str, optional): The category to filter the search results. Defaults to None. sub_category (str, optional): The sub-category to filter the search results. Defaults to None. Returns: list: A list of search results matching the specified category and sub-category. Examples: Return every ETL >>> etl_pipeline.search() Only selected category >>> etl_pipeline.search('data_ingestion') >>> etl_pipeline.search(category='data_ingestion') Only selected category & sub_category >>> etl_pipeline.search('data_ingestion', 'ufl') >>> etl_pipeline.search(category='data_ingestion', sub_category='ufl') """ return self.registry.search(category=category, sub_category=sub_category) def get(self, key): """get ETL class from registry""" return self.registry.get(key=key) def setup_spark_conf(self, config, verbose=False): """ AWS credential setting log is not influenced by the verbose by design """ # TODO: add more spark configurations spark_conf = SparkConf() spark_conf.set("spark.master", config.spark.master) spark_conf.set("spark.app.name", config.spark.appname) spark_conf.set("spark.driver.memory", config.spark.driver.memory) spark_conf.set("spark.driver.maxResultSize", config.spark.driver.maxResultSize) spark_conf.set("spark.executor.memory", config.spark.executor.memory) spark_conf.set("spark.local.dir", config.spark.local.dir) spark_conf.set("spark.ui.port", config.spark.ui.port) spark_conf.set("spark.jars.packages", "graphframes:graphframes:0.8.3-spark3.5-s_2.12") # AWS S3 Support if aws_check_credentials(verbose=verbose): session = boto3.Session() credentials = session.get_credentials() spark_conf.set("spark.hadoop.fs.s3a.access.key", credentials.access_key) spark_conf.set("spark.hadoop.fs.s3a.secret.key", credentials.secret_key) spark_conf.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem") hadoop_ver = SystemSetting().get("HADOOP_VERSION") spark_conf.set( "spark.jars.packages", ( f"org.apache.hadoop:hadoop-aws:{hadoop_ver}" f",com.amazonaws:aws-java-sdk-bundle:1.12.592" ), ) # check if the credentials are temporary or not try: spark_conf.set("spark.hadoop.fs.s3a.session.token", credentials.token) spark_conf.set( "spark.hadoop.fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider", ) # this is for temporary credentials print("spark conf is set with [ temporary ] S3 credentials") except Exception: print("spark conf is set with [ permanent ] S3 credentials") else: print("[ No AWS Credentials Found] - Failed to set spark conf for S3") return spark_conf def sample( self, n=100, config=None, sample_etl="data_ingestion___test___generate_fake_ufl", verbose=False, ): """ Get the spark session and sample data. Use this function to test the ETL pipeline quickly without config. Args: n (int): The number of data to generate. Default is 100. config (Union[str, dict, OmegaConf]): Config for the ETL. Default is None. sample_etl (str): The name of the sample ETL process. Default is "data_ingestion___test___generate_fake_ufl". verbose (bool): If True, print the status. Default is False. Returns: Tuple[SparkSession, DataFrame]: The Spark session and the sampled data. """ if config is None: config = Config.default() else: config = Config.load(config) config = Config.set_default(config) # remove all the ETL processes config.etl = [] config.etl.append({"name": sample_etl, "args": {"n": n}}) if verbose: print("=" * 50) print("[ Configuration ]") print(OmegaConf.to_yaml(config)) print("=" * 50) spark_conf = self.setup_spark_conf(config, verbose=verbose) spark = SparkSession.builder.config(conf=spark_conf).getOrCreate() if verbose: print("=" * 50) print("[ Spark Final Configuration ]") print(OmegaConf.to_yaml(spark_conf.getAll())) print("=" * 50) sample_etl_class = self.get(key=sample_etl) data = sample_etl_class()(spark, n=n, etl_name=sample_etl) if verbose: print( ( f"{'=' * 50}\n" "[ SAMPLE MODE ]\n" f"{'=' * 50}\n" "This is a quick way to get the sample data for testing or debugging w/o config.\n" "If you want to test the ETL pipeline with your own data, please use `run` w/ config.\n" f"{'=' * 50}\n" "=> spark, data = etl_pipeline.sample()\n" "=> data = data.map(add awesome duck to column)\n" f"{'=' * 50}\n" ) ) return spark, data def run( self, config: Union[str, dict, DictConfig, OmegaConf, Path], verbose=False, cache=False, emr=False, *args, **kwargs, ): """ Runs the ETL process. Args: config (Union[str, dict, OmegaConf]): config for the etl - str: path to the config file - dict: config dict - OmegaConf: config object verbose (bool): if True, print the status of the etl pipeline - the verbose will be applied to the ETL process as well - ETL process `verbose` takes precedence over this cache (bool): cache every stage of the ETL process emr (bool): if True, run the ETL process on EMR """ # ================ [ EMR ] =================== if emr: return self.run_emr( config, verbose=verbose, cache=cache, *args, **kwargs, ) # =============== [ Set Config ] ================== # mainly this is to fill the missing config args with default config = Config.load(config) config = Config.set_default(config) if verbose: print("=" * 50) print("[ Configuration ]") print(OmegaConf.to_yaml(config)) print("=" * 50) # ================ [ Set Spark ] =================== spark_conf = self.setup_spark_conf(config, verbose=verbose) spark = SparkSession.builder.config(conf=spark_conf).getOrCreate() if verbose: print("=" * 50) print("[ Spark Final Configuration ]") print(OmegaConf.to_yaml(spark_conf.getAll())) print("=" * 50) # ================= [ Run ETL ] ==================== # [ Load RDD/DataFrame ] - data ingestion # [ Preprocessing ] # [ Save RDD/DataFrame ] - data save etl_configs = config.etl total_etl_n = len(etl_configs) # [switch] is the ETL process ended or not # if not, spark session & data will be returned to continue IS_ETL_FINISHED = True data = None prev_etl_name = None prev_data = None # for caching for etl_i, etl_config in enumerate(etl_configs): # etl_config.name format # =====>[ etl_cate___etl_sub_cate___etl_name ] etl_name = etl_config.name etl_category = etl_name.split("___")[0] etl_class = self.get(key=etl_name) # instantiate etl class etl_instance = etl_class() # this is middle creator mode # if the last ETL process is not data save if etl_i == total_etl_n - 1 and etl_category != "data_save": if verbose: print( ( f"{'=' * 50}\n" "[ DEBUG MODE ]\n" f"{'=' * 50}\n" f"Last ETL process was assigned for [ {etl_category} ]\n" "Spark session will not be stopped and will be returned\n" "If this is not intended, please assign [ data_save ] at the end.\n" f"{'=' * 50}\n" "Example:\n" "=> spark, data = etl_pipeline.run(config)\n" "=> data = data.map(add awesome duck to column)\n" f"{'=' * 50}\n" ) ) IS_ETL_FINISHED = False # when args is not defined, set it to empty dict if "args" in etl_config: args = etl_config.args else: args = {} # if verbose is not defined, set it same to the pipeline if "verbose" not in args: args["verbose"] = verbose # `etl_name` is passed to args for tracking if etl_i == 0: data = etl_instance(spark, **args, etl_name=etl_name, prev_etl_name=None) else: data = etl_instance( spark, data, **args, etl_name=etl_name, prev_etl_name=prev_etl_name ) # cache the data if cache: if prev_data is not None: prev_data.unpersist() data.cache() prev_data = data prev_etl_name = etl_name # =============== [ Stop Spark ] ================== if IS_ETL_FINISHED: spark.stop() if verbose: print("=" * 50) print("[ Spark Successfully Done ]") print("=" * 50) return spark, data def run_emr( self, config: Union[str, dict, DictConfig, OmegaConf, Path], verbose=False, cache=False, *args, **kwargs, ): """ Runs the ETL process on an EMR cluster. Args: config (Union[str, dict, OmegaConf]): config for the etl - str: path to the config file - dict: config dict - OmegaConf: config object verbose (bool): if True, print the status of the etl pipeline - the verbose will be applied to the ETL process as well - ETL process `verbose` takes precedence over this cache (bool): cache every stage of the ETL process Returns: None, Config: - None for spark session - Config for the config - originally data is returned, but it is not necessary for EMR """ if not aws_check_credentials(verbose=verbose): raise ValueError("AWS EMR requires AWS credentials") # =============== [ Set Config ] ================== config = Config.load(config) config = Config.set_default(config, emr=True) # EMR resource manager - yarn config.spark.master = "yarn" # reset local_dir for EMR cluster config.spark.local.dir = "/tmp" # ================ [ EMR ] =================== # NOTE: config will be auto-updated by EMR Manager emr_manager = EMRManager() try: # EMR cluster launch emr_manager.launch(config) if verbose: print("=" * 50) print("[ Configuration ]") print(OmegaConf.to_yaml(config)) print("=" * 50) # EMR cluster environment setup & run spark step_id = emr_manager.run(config, verbose=verbose) # wait until EMR cluster step is done emr_manager.wait(config, step_id) # EMR Cluster terminate # XXX: after EMR cluster is terminated, and confirmed by waiter # there is still a chance that the cluster is not terminated and cause error # - DependencyViolation (which depends on terminated cluster) # FIXME: this is a temporary solution, need to find a better way to handle this RETRY_TERMINATE = 5 for _ in range(RETRY_TERMINATE): try: emr_manager.terminate(config) break except AWSClient().ec2.exceptions.ClientError as e: if e.response["Error"]["Code"] == "DependencyViolation": print("DependencyViolation - retrying to terminate EMR cluster") time.sleep(5) else: raise e except Exception as e: raise e # ctrl + c except KeyboardInterrupt: print("KeyboardInterrupt - terminating EMR cluster") emr_manager.terminate(config) raise KeyboardInterrupt except Exception as e: print("Exception - terminating EMR cluster") emr_manager.terminate(config) raise e return None, config ================================================ FILE: dataverse/etl/quality/README.md ================================================ # Quality ================================================ FILE: dataverse/etl/quality/__init__.py ================================================ ================================================ FILE: dataverse/etl/quality/language.py ================================================ """ language filtering from Common Crawl This is a migration of the common crawl code to Dataverse. some part of code is from facebookresearch/cc_net https://github.com/facebookresearch/cc_net/blob/main/cc_net/split_by_lang.py Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import functools from pathlib import Path from typing import List, Union import requests from fasttext.FastText import _FastText from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl.registry import register_etl from dataverse.utils.setting import SystemSetting def load_fasttext( url="https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin", ): """ There is 2 issues found here - due to unserilizable fasttext problem, we need to load the model for every task - this is a problem, extremely slow - we need to load the model once and use it for all tasks - since this could lead to duplicated download, we need to check if the model is already downloaded - so far found no duplicated download, but if there is, hope to be fixed in the future """ # FIXME: this is a manual check for duplicate download # rd_n = np.random.randint(0, 1000000) # print(rd_n, 'entered load_fasttext model!') # Get the lid.bin file for Fasttext cache_dir = SystemSetting().CACHE_DIR cache_dir = Path(f"{cache_dir}/.cache/dataverse/model") fasttext_path = cache_dir / "fasttext" / "bin" / "lid.bin" fasttext_path.parent.mkdir(parents=True, exist_ok=True) # Make directories if not existed if not fasttext_path.exists(): # FIXME: this is a manual check for duplicate download # print(rd_n, 'downloading fasttext model!') response = requests.get(url, stream=True) # Raise exception if downloading is not successful response.raise_for_status() with open(fasttext_path, "wb") as f: for chunk in response.iter_content(chunk_size=8192): f.write(chunk) # FIXME: this is to suppress the warning message # return fasttext.load_model(str(fasttext_path)) return _FastText(model_path=str(fasttext_path)) def language_predict_fasttext(row, model, top_k: int = 1, score_rounding: int = 2): text = row["text"].replace("\n", "") labels, scores = model.predict(text, k=top_k) labels = [label.replace("__label__", "") for label in labels] row["labels"] = labels row["scores"] = scores.round(score_rounding) return row def language_predict_fasttext_by_partition(rows, top_k: int = 1, score_rounding: int = 2): # loaded for every partition model = load_fasttext() # FIXME: not possible to use multiprocessing here because of the model is not serializable # pool = multiprocessing.Pool(processes = os.cpu_count() or 0) # results = pool.imap( # functools.partial(language_predict_fasttext, model=model, top_k=top_k), # rows, # ) for row in rows: yield language_predict_fasttext(row, model, top_k=top_k) @register_etl def quality___language___fasttext_filter( spark, data: Union[RDD, DataFrame], subset: str = "text", top_k: int = 1, score_rounding: int = 2, threshold: float = 0.0, whitelist: List[str] = None, blacklist: List[str] = None, *args, **kwargs, ) -> RDD: """ Filters data based on language using fasttext. If language score is below threshold, that row will be filtered. Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be processed. subset (str, optional): A subset or column to consider. Defaults to 'text'. top_k(int, optional): The number of top languages to keep after classification. Defaults to 1. - if fasttext classified 3 languages, top_k=1 will keep the top language - [en, fr, de] -> [en] - if fasttext classified 3 languages, top_k=2 will keep the top 2 languages - [en, fr, de] -> [en, fr] score_rounding(int, optional): The number of decimal places to round the scores. Defaults to 2. threshold(float, optional): The minimum score to keep the language. Defaults to 0.0. whitelist(List[str], optional): The list of languages to keep. Defaults to None. blacklist(List[str], optional): The list of languages to remove. Defaults to None. Raises: ValueError: If both whitelist and blacklist are not None. Returns: rdd: The filtered data. Caveats about `whitelist` and `blacklist`: - [Default] If both `whitelist` and `blacklist` are None, all languages will be kept. - If both `whitelist` and `blacklist` are not None, an error will be raised. - If `whitelist` is not None, only the languages in the `whitelist` will be kept. - If `blacklist` is not None, the languages in the `blacklist` will be removed. """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) # detect language using fasttext data = data.mapPartitions( functools.partial( language_predict_fasttext_by_partition, top_k=top_k, score_rounding=score_rounding, ) ) # filter by threshold data = data.filter(lambda x: any(s >= threshold for s in x["scores"][:top_k])) # filter by whitelist and blacklist if whitelist is not None and blacklist is not None: raise ValueError("whitelist and blacklist cannot be both not None") elif whitelist is not None: data = data.filter(lambda x: any(label in whitelist for label in x["labels"][:top_k])) elif blacklist is not None: data = data.filter(lambda x: all(label not in blacklist for label in x["labels"][:top_k])) else: # otherwise, keep all languages ... # remove labels and scores data = data.map(lambda x: {k: v for k, v in x.items() if k != "labels" and k != "scores"}) return data ================================================ FILE: dataverse/etl/registry.py ================================================ """ Base class to support the registration of the ETL classes Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ import abc import importlib.util import inspect import os from functools import wraps from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.utils.setting import SystemSetting # TODO: If you add category directories, add them here too # _sample is a special directory that is not imported # This is where you choose what categories to register ETL_CATEGORIES = [ "data_ingestion", "decontamination", "deduplication", "bias", "toxicity", "cleaning", "pii", "quality", "data_save", "utils", ] IGNORE_FILES = [ "__init__.py", ] def auto_register(etl_categories=ETL_CATEGORIES): """ This will automatically register all ETLs to the registry """ etl_path = os.path.dirname(os.path.abspath(__file__)) for etl_category in etl_categories: # Get the files(sub-categories) in the category category_path = os.path.join(etl_path, etl_category) files = os.listdir(category_path) # Filter out non-Python files files = [f for f in files if f.endswith(".py")] # Dynamically import all Python files in the directory for file in files: if file in IGNORE_FILES: continue file_path = os.path.join(category_path, file) # Remove .py at the end module_name = file[:-3] spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # To avoid circular dependency class ETLStructure: ... class ETLRegistry: """Singleton class to register the ETL classes. This class provides a registry for ETL classes. It ensures that only one instance of the registry is created and provides methods to register, search, and retrieve ETL classes. Attributes: _initialized (bool): Flag to check if the class has been initialized. _registry (dict): Dictionary to store the registered ETL classes. _status (dict): Dictionary to store the status of the registered ETL classes. Methods: __new__(): Creates a new instance of the class if it doesn't exist. __init__(): Initializes the class and registers the ETL classes. __len__(): Returns the number of registered ETL classes. __repr__(): Returns a string representation of the registry. __str__(): Returns a string representation of the registry. _update_status(key): Updates the status of the registry. _convert_to_report_format(status, print_sub_category, print_etl_name): Converts the status to a report format. """ _initialized = False def __new__(cls): if not hasattr(cls, "instance"): cls.instance = super(ETLRegistry, cls).__new__(cls) return cls.instance def __init__(self): """ when the class is initialized, this is called everytime regardless of the singleton. So adding the flag to check """ if self._initialized: return self._initialized = True self._registry = {} self._status = {} auto_register() def __len__(self): return len(self._registry.keys()) def __repr__(self): return self._convert_to_report_format(self._status) def __str__(self): return self.__repr__() def reset(self): """ reset the registry """ self._registry = {} def register(self, key: str, etl: ETLStructure): """ Registers the ETL (Extract, Transform, Load) process. Args: key (str): The key used to identify the ETL process. Should be in the format below: etl (ETLStructure): The ETL process to be registered. It should be a subclass of ETLStructure. Raises: ValueError: If the key is not all lowercase, not separated by '___', or does not have 2 layers of category. TypeError: If the ETL class is not a subclass of ETLStructure. KeyError: If the key is already registered. Note: - The key should be in the format of: - all lowercase - separated by ___ - it should have 2 layers of category - Example: <etl_type>___<file_key>___<etl_key> or <category>___<sub_category>___<etl_key>. """ if not key.islower(): raise ValueError(f"The key [ {key} ] should be all lowercase") if "___" not in key: raise ValueError(f"The key [ {key} ] should be separated by ___") if len(key.split("___")) != 3: raise ValueError(f"The key [ {key} ] should have 2 layers of category") # all the etl should be the subclass of ETLStructure if not issubclass(etl, ETLStructure): raise TypeError(f"ETL class should be subclass of ETLStructure not {etl}") # register if key in self._registry: if (os.getenv("DATAVERSE_TEST_MODE") == "True") or ( os.getenv("DATAVERSE_BUILD_DOC") == "true" ): pass else: raise KeyError(f"The key [ {key} ] is already registered") else: self._registry[key] = etl self._update_status(key=key) def _update_status(self, key: str): category, sub_category, _ = key.split("___") if category not in self._status: self._status[category] = {} if sub_category not in self._status[category]: self._status[category][sub_category] = [key] else: self._status[category][sub_category].append(key) def search(self, category: str = None, sub_category: str = None): """ Search the ETL. Args: category (str, optional): The category to search for. Defaults to None. sub_category (str, optional): The sub-category to search for. Defaults to None. Returns: dict: A dictionary containing the filtered status information. Raises: AssertionError: If category is a list or not a string. AssertionError: If sub_category is a list or not a string. ValueError: If sub_category is specified without category. Note: - Printing all the information is fixed as default. - Set print_sub_category to True to print the sub-category. - Set print_etl_name to True to print the ETL name. """ status = self._status filtered_status = {} if category is not None: assert type(category) != list, "we do not support list search for category" assert type(category) == str, "category must be a string" if sub_category is None: filtered_status[category] = status[category] else: assert type(sub_category) != list, "we do not support list search for sub-category" assert type(sub_category) == str, "sub_category must be a string" filtered_status[category] = {sub_category: status[category][sub_category]} else: if sub_category is not None: raise ValueError("sub-category cannot be specified without category") filtered_status = status return self._convert_to_report_format( filtered_status, print_sub_category=True, print_etl_name=True, ) def _convert_to_report_format( self, status, print_sub_category=False, print_etl_name=False, ): """ convert status to report format This includes the number of ETLs in each category and sub-category and depending on the options, it can include the name of the ETLs Args: status (dict): the status from `search` """ # count the number of etls stats = {} total = 0 categories = list(status.keys()) for category in categories: if category not in stats: stats[category] = {} stats[category]["__total__"] = 0 sub_categories = list(status[category].keys()) for sub_category in sub_categories: sub_n = len(status[category][sub_category]) stats[category][sub_category] = sub_n stats[category]["__total__"] += sub_n total += sub_n # convert to the report format infos = [] infos.append("=" * 50) infos.append(f"Total [ {total} ]") infos.append("=" * 50) for category in categories: infos.append(f"{category} [ {stats[category]['__total__']} ]") sub_categories = list(status[category].keys()) if print_sub_category: for sub_category in sub_categories: infos.append(f"{' ' * 4}- {sub_category} [ {stats[category][sub_category]} ]") if print_etl_name: for etl in status[category][sub_category]: infos.append(f"{' ' * 8}- {etl}") return "\n".join(infos) def get(self, key: str) -> ETLStructure: """ Retrieves the ETLStructure associated with the given key. Args: key (str): The key used to retrieve the ETLStructure. Should be in the format below. Returns: ETLStructure: The ETLStructure associated with the given key. Raises: ValueError: If the key is not all lowercase, not separated by '___', or does not have 2 layers of category. KeyError: If the key is not registered in the registry. Note: - The key should be in the format of: - all lowercase - separated by ___ - it should have 2 layers of category - Example: <etl_type>___<file_key>___<etl_key> or <category>___<sub_category>___<etl_key>. """ if not key.islower(): raise ValueError(f"The key [ {key} ] should be all lowercase") if "___" not in key: raise ValueError(f"The key [ {key} ] should be separated by ___") if len(key.split("___")) != 3: raise ValueError(f"The key [ {key} ] should have 2 layers of category") if key not in self._registry: raise KeyError(f"The key {key} is not registered") return self._registry[key] def get_all(self): """ get all the etls Returns: list: list of all registered etls """ return list(self._registry.values()) class ETLAutoRegistry(abc.ABCMeta, type): def __new__(cls, name, bases, attrs): """ Metaclass to register the ETL classes automatically to the registry """ # singleton registry new_class = super().__new__(cls, name, bases, attrs) # BaseETL is base class and should not be registered # Another reason is BaseETL is not initialized yet before `__new__` is done but # the registry will verify the class is subclass of BaseETL and raise error # because BaseETL is not initialized yet :) if name != "BaseETL": if "__file_path__" not in attrs: raise TypeError( "Direct inheritance from BaseETL not allowed. Use @register_etl decorator." ) registry = ETLRegistry() registry.register(key=name, etl=new_class) return new_class class BaseETL(ETLStructure, metaclass=ETLAutoRegistry): """ Base class for spark ETL. This class provides a base structure for implementing spark ETL processes. If you need to use `self` directly, inherit this class. Methods: run(self, data, *args, **kwargs): Run the preprocessing logic. This method should be implemented by subclasses. __call__(self, *args, **kwargs): Call the `run` method to perform the preprocessing. """ @abc.abstractmethod def run(self, data: Union[RDD, DataFrame], *args, **kwargs): """ run the preprocessing """ raise NotImplementedError() def __call__(self, *args, **kwargs): """ call the method to do the preprocessing """ return self.run(*args, **kwargs) def add_self(func): """ Decorator to add self to the function intent is to make the function as a method """ @wraps(func) def wrapper(self, *args, **kwargs): return func(*args, **kwargs) wrapper.__doc__ = func.__doc__ return wrapper def register_etl(func): """ Decorator to register a function as an ETL. Args: func (callable): The function to be registered as an ETL. Returns: type: A dynamically created class that inherits from BaseETL and wraps the original function. Raises: None. About Attributes: - __file_path__ (str): The file path of the function where it is defined. - __etl_dir__ (bool): If the file is in the etl directory. If not, it means it's a dynamically registered user-defined ETL. Example: >>> @register_etl >>> def my_etl_function(): >>> pass Note: The registered ETL function should not rely on the `self` parameter. If you need to use `self`, directly inherit the BaseETL class. """ ETL_DIR = os.path.join(SystemSetting().DATAVERSE_HOME, "etl") etl_file_path = inspect.getfile(func) # I know using class name without snake case is awkward # but I want to keep the class name as it is and user won't know it etl_cls = type( func.__name__, (BaseETL,), { "run": add_self(func), "__file_path__": etl_file_path, "__etl_dir__": etl_file_path.startswith(ETL_DIR), }, ) etl_cls.__doc__ = func.__doc__ etl_cls.__is_etl__ = True return etl_cls ================================================ FILE: dataverse/etl/toxicity/README.md ================================================ ================================================ FILE: dataverse/etl/toxicity/__init__.py ================================================ ================================================ FILE: dataverse/etl/utils/README.md ================================================ # Utils > Utilities for the ETL process. Not really part of the ETL process but useful for the ETL process. This could be including the following - logging - error handling - data validation - sampling - etc ================================================ FILE: dataverse/etl/utils/__init__.py ================================================ ================================================ FILE: dataverse/etl/utils/log.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl @register_etl def utils___log___count( spark, data: Union[RDD, DataFrame], prev_etl_name: str = None, *args, **kwargs ) -> Union[RDD, DataFrame]: """ Simply count the number of rows in the data Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to extract the nouns from. prev_etl_name (str, optional): name of the previous ETL process. Defaults to None. Returns: Union[RDD, DataFrame]: The input data. Nothing is changed. """ total_data = data.count() print("=" * 50) print(f"After [ {prev_etl_name} ] - Total data: {total_data}") print("=" * 50) return data ================================================ FILE: dataverse/etl/utils/sampling.py ================================================ """ Sampling module for data ingestion Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl @register_etl def utils___sampling___random( spark, data: Union[RDD, DataFrame], replace: bool = False, sample_n_or_frac: float = 0.1, seed: int = 42, *args, **kwargs ) -> RDD: """ Randomly sample the input RDD. Args: spark (SparkSession): The Spark session object. data (Union[RDD, DataFrame]): The input data to be sampled. replace (bool, optional): Whether to sample with replacement. Defaults to False. sample_n_or_frac (float, optional): Number of samples to take or fraction of the RDD to sample. Defaults to 0.1 seed (int, optional): Seed for the random number generator. Defaults to 42. Returns: RDD: Sampled RDD """ if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) if isinstance(sample_n_or_frac, float): data = data.sample(replace, sample_n_or_frac, seed) # XXX: Take too long, 1M sample takes over 10 mins and didn't finish elif isinstance(sample_n_or_frac, int): data = data.takeSample(replace, sample_n_or_frac, seed) return data ================================================ FILE: dataverse/etl/utils/statistics.py ================================================ """ Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license """ from operator import add from typing import Union from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import register_etl @register_etl def utils___statistics___korean_nouns( spark, data: Union[RDD, DataFrame], subset: str = "text", *args, **kwargs ) -> RDD: """ Get the frequency of each noun in the given subset of the data. Args: spark: The SparkSession object. data: The data to extract the nouns from. subset: The subset of the data to extract the nouns from. Defaults to 'text'. Returns: RDD[List[Tuple[str, int]]]: The frequency of each noun in the given subset of the data. Raises: ImportError: If konlpy or Mecab is not installed. Examples: >>> data = [ ... {'text': '오리는 꽥꽥 웁니다. 거위는'}, ... {'text': '안녕 세상!'}, ... {'text': '사람들은 꽥꽥 울지 않습니다. 오리가 웁니다'}, ... ] >>> result = utils___statistics___korean_nouns()(spark, data, subset='text') >>> result.collect() [('오리', 2), ('거위', 1), ('세상', 1), ('사람', 1)] Caveats: - This function works for Korean text only. - The function returns the frequency of each noun, not the unique noun list. """ # konlpy & mecab try: from konlpy.tag import Mecab except ImportError: raise ImportError( "Please install konlpy & Mecab:\n" "pip install konlpy\n" "pip install mecab-python3\n" ) if isinstance(data, DataFrame): data = data.rdd mecab = Mecab() def _parse_korean_nouns(text): try: if text is not None: return mecab.nouns(text) else: return [] except Exception: # Log the exception for debugging purposes return [] # Count the frequency of each noun data = data.flatMap(lambda x: _parse_korean_nouns(x[subset])) noun_counts = data.map(lambda noun: (noun, 1)).reduceByKey(add) return noun_counts ================================================ FILE: dataverse/lab/README.md ================================================ # Lab > Space Laboratory for data analysis This will be further supported. - Data Exploration - Data Visualization - ETC ================================================ FILE: dataverse/lab/__init__.py ================================================ ================================================ FILE: dataverse/tests/conftest.py ================================================ import os import sys import pytest sys.path.append("./") # to find etl folder as module @pytest.fixture(scope="session", autouse=True) def set_test_mode_env(): old_value = os.getenv("DATAVERSE_TEST_MODE") # Activate test mode os.environ["DATAVERSE_TEST_MODE"] = "True" # Bring back to previous value yield if old_value is None: del os.environ["DATAVERSE_TEST_MODE"] else: os.environ["DATAVERSE_TEST_MODE"] = old_value ================================================ FILE: dataverse/tests/test_cleaning_accent.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline, register_etl @register_etl def helper___test___generate_accent(spark, *args, **kwargs): data = [("café",), ("résumé",), ("piñata",)] df = spark.createDataFrame(data, ["text"]) return df def test_cleaning___accent____remove(): from etl.cleaning.accent import cleaning___accent___remove # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "spark": { "appname": "TEST-cleaning-accent", "driver": {"memory": "4g"}, "args": { "verbose": True, }, }, "etl": [ {"name": "helper___test___generate_accent"}, {"name": "cleaning___accent___remove"}, ], } ) spark, result = etl_pipeline.run(ETL_config) result_df = result.toDF() expected_data = [("cafe",), ("resume",), ("pinata",)] expected_df = spark.createDataFrame(expected_data, ["text"]) assert expected_df.collect() == result_df.collect() ================================================ FILE: dataverse/tests/test_cleaning_char.py ================================================ import random import re from typing import Union from omegaconf import OmegaConf from pyspark.rdd import RDD from pyspark.sql import DataFrame from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl faker_seed = 42 unprintable_chars = list(map(chr, range(32))) + list(map(chr, range(127, 160))) @register_etl def helper___test___generate_whitespace( spark, data: Union[RDD, DataFrame], subset="text", *args, **kwargs ): if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _generate_whitespace(row): row[subset] = row[subset].replace(" ", " " * random.randint(1, 5)) row[subset] = " " * random.randint(0, 5) + row[subset] + " " * random.randint(0, 5) return row data = data.map(_generate_whitespace) return data def test_cleaning___char___normalize_whitespace(): from etl.cleaning.char import cleaning___char___normalize_whitespace # noqa: F401 from etl.data_ingestion.test import ( # noqa: F401 data_ingestion___test___generate_fake_ufl, ) etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "spark": { "appname": "Test-cleaning-char", "driver": {"memory": "16g"}, "verbose": True, }, "etl": [ {"name": "data_ingestion___test___generate_fake_ufl"}, { "name": "helper___test___generate_whitespace", "args": {"subset": "text"}, }, {"name": "cleaning___char___normalize_whitespace"}, ], } ) _, result = etl_pipeline.run(ETL_config) double_space_pattern = re.compile(r"[\s\r\n]{2,}") for row in result.collect(): assert row["text"] == row["text"].strip() assert re.findall(double_space_pattern, row["text"]) == [] @register_etl def helper___test___generate_unprintable( spark, data: Union[RDD, DataFrame], subset="text", *args, **kwargs ): if isinstance(data, DataFrame): data = data.rdd data = data.map(lambda row: row.asDict()) def _insert_unprintable_chars(text): unprintable_chars = list(map(chr, range(32))) + list(map(chr, range(127, 160))) for _ in range(random.randint(1, 20)): position = random.randint(0, len(text) - 1) char = random.choice(unprintable_chars) text = text[:position] + char + text[position:] return text def _generate_unprintable(row): row[subset] = _insert_unprintable_chars(row[subset]) return row data = data.map(_generate_unprintable) return data def test_cleaning___char___remove_unprintable(): from etl.cleaning.char import cleaning___char___remove_unprintable # noqa: F401 from etl.data_ingestion.test import ( # noqa: F401 data_ingestion___test___generate_fake_ufl, ) etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "spark": { "appname": "Test-cleaning-char", "driver": {"memory": "16g"}, "verbose": True, }, "etl": [ { "name": "data_ingestion___test___generate_fake_ufl", "args": {"faker_seed": faker_seed}, }, { "name": "helper___test___generate_unprintable", "args": {"subset": "text"}, }, ], } ) _, unprintable_check = etl_pipeline.run(ETL_config) for row in unprintable_check.collect(): assert any(chars in row["text"] for chars in unprintable_chars) etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "spark": { "appname": "Test-cleaning-char", "driver": {"memory": "16g"}, "verbose": True, }, "etl": [ { "name": "data_ingestion___test___generate_fake_ufl", "args": {"faker_seed": faker_seed}, }, { "name": "helper___test___generate_unprintable", "args": {"subset": "text"}, }, { "name": "cleaning___char___remove_unprintable", "args": {"subset": "text"}, }, ], } ) _, result = etl_pipeline.run(ETL_config) for row in result.collect(): assert any(chars not in row["text"] for chars in unprintable_chars) ================================================ FILE: dataverse/tests/test_cleaning_document.py ================================================ import pytest from faker import Faker from omegaconf import OmegaConf from dataverse.etl import ETLPipeline faker_seed = 42 word_per_chunk = 5 delimiter = " " @pytest.fixture(scope="function", autouse=True) def fake_data_rdd(): Faker.seed(faker_seed) def test_cleaning___document___split_by_word(): from etl.cleaning.document import cleaning___document___split_by_word # noqa: F401 from etl.data_ingestion.test import ( # noqa: F401 data_ingestion___test___generate_fake_ufl, ) etl_pipeline = ETLPipeline() ETL_Config = OmegaConf.create( { "spark": { "appname": "TestCleaningDocument", "driver": {"memory": "16g"}, "verbose": True, }, "etl": [ {"name": "data_ingestion___test___generate_fake_ufl"}, ], } ) _, original = etl_pipeline.run(ETL_Config) etl_pipeline = ETLPipeline() ETL_Config = OmegaConf.create( { "spark": { "appname": "TestCleaningDocument", "driver": {"memory": "16g"}, "verbose": True, }, "etl": [ { "name": "data_ingestion___test___generate_fake_ufl", "args": {"faker_seed": faker_seed}, }, { "name": "cleaning___document___split_by_word", "args": { "word_per_chunk": word_per_chunk, "subset": "text", "delimiter": delimiter, }, }, ], } ) _, result = etl_pipeline.run(ETL_Config) # check it is splitted properly assert all(len(row["text"].split(delimiter)) <= word_per_chunk for row in result.collect()) # check combined version of splitted is same with the original result_combine = delimiter.join(result.map(lambda x: x["text"]).collect()) original_combine = delimiter.join(original.map(lambda x: x["text"]).collect()) assert len(original_combine) == len(result_combine) assert original_combine == result_combine ================================================ FILE: dataverse/tests/test_cleaning_html.py ================================================ import random from faker import Faker from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl faker_seed = 42 random_seed = 42 @register_etl def helper___test___generate_html( spark, n=100, repartition=20, faker_seed=None, random_seed=None, verbose=True, *args, **kwargs, ): faker = Faker() if faker_seed is not None: Faker.seed(faker_seed) if random_seed is not None: random.seed(random_seed) def _generate_fake_html_format(): tags = ["p", "h1", "h2", "div", "span", "a", "ul", "ol", "li", "strong", "em"] html_content = "" for _ in range(random.randint(3, 10)): tag = random.choice(tags) if tag in ["ul", "ol"]: items = "" for _ in range(random.randint(2, 4)): items += f"<li>{faker.sentence()}</li>" html_content += f"<{tag}>{items}</{tags}>" elif tag == "a": html_content += f'<a href="{faker.url()}">{faker.word()}</a>' else: html_content += f"<{tag}>{faker.text()}</{tag}>" return html_content def _generate_fake_html(n=100): while n > 0: n -= 1 fake_html = _generate_fake_html_format() yield {"id": faker.uuid4(), "text": fake_html} rdd = spark.sparkContext.parallelize(_generate_fake_html(n=n)) rdd = rdd.repartition(repartition) return rdd def test_cleaning___html___extract_plain_text(): from etl.cleaning.html import cleaning___html___extract_plain_text # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ { "name": "helper___test___generate_html", "args": {"faker_seed": faker_seed}, }, { "name": "cleaning___html___extract_plain_text", "args": {"subset": "text"}, }, ] } ) _, result = etl_pipeline.run(ETL_config) for row in result.collect(): assert "<" not in row["text"] assert ">" not in row["text"] def test_cleaning___html___extract_plain_text_trafilatura(): from etl.cleaning.html import cleaning___html___extract_plain_text # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ { "name": "helper___test___generate_html", "args": {"faker_seed": faker_seed}, }, { "name": "cleaning___html___extract_plain_text", "args": {"subset": "text", "trafilatura": True}, }, ] } ) _, result = etl_pipeline.run(ETL_config) for row in result.collect(): assert "<" not in row["text"] assert ">" not in row["text"] ================================================ FILE: dataverse/tests/test_cleaning_korean.py ================================================ import random from faker import Faker from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl faker_seed = 42 random_seed = 42 @register_etl def helper___test___generate_korean( spark, n=100, repartition=20, create_type="word", faker_seed=None, random_seed=None, verbose=True, *args, **kwargs, ): """ generate fake data that is mixed with korean and english. This creates data based on random ratio for each row. Args: spark (SparkSession): spark session n (int): the number of data to generate repartition (int): the number of partitions create_type (str): handles type of creating random data. faker_seed (int, optional): Random seed of faker library. Defaults to None. random_seed (int, optional): Random seed of random library. Defaults to None. verbose (bool): whether to print the information of the dataset """ assert create_type in [ "char", "word", ], "this is following filter_type of function `cleaning___korean___filter_by_ratio`" faker = Faker(["en_US", "ko_KR"]) if faker_seed is not None: Faker.seed(faker_seed) faker_en = faker["en_US"] faker_ko = faker["ko_KR"] if random_seed is not None: random.seed(random_seed) from etl.cleaning.korean import JAUM, KOR_BEGIN, KOR_END, MOUM jamo = JAUM + MOUM def _generate_fake_korean_english_mixed( korean_ratio, total_count, create_type, space_ratio=0.3 ): if create_type == "word": words = [] for _ in range(total_count): if random.random() < korean_ratio: words.append(faker_ko.name()) else: words.append(faker_en.last_name()) return " ".join(words) else: # create_type == "char" chars = "" korean_length = int(total_count * korean_ratio) for _ in range(korean_length): korean_type = random.choice(["jamo", "eumjeol"]) cur_kor = ( chr(random.randint(KOR_BEGIN, KOR_END)) if korean_type == "eumjeol" else random.choice(jamo) ) chars += cur_kor if random.random() < space_ratio: chars += " " english_length = total_count - korean_length english_text = faker_en.sentence(nb_words=english_length) chars += english_text[:english_length] return chars def _generate_fake_korean(n=100, create_type="word"): while n > 0: n -= 1 korean_ratio = random.random() total_count = random.randint(0, 300) fake_korean = _generate_fake_korean_english_mixed( korean_ratio, total_count, create_type ) yield { "id": faker.uuid4(), "text": fake_korean, "korean_ratio": korean_ratio, } rdd = spark.sparkContext.parallelize(_generate_fake_korean(n=n, create_type=create_type)) rdd = rdd.repartition(repartition) return rdd def test_cleaning___korean___filter_by_ratio(): from etl.cleaning.korean import cleaning___korean___filter_by_ratio # noqa filter_type = "word" korean_ratio = 0.6 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ { "name": "helper___test___generate_korean", "args": { "faker_seed": faker_seed, "random_seed": random_seed, "n": 1000, "create_type": filter_type, }, }, { "name": "cleaning___korean___filter_by_ratio", "args": { "subset": "text", "korean_ratio": korean_ratio, "filter_type": filter_type, }, }, ] } ) _, result = etl_pipeline.run(ETL_config) assert any(row["korean_ratio"] < korean_ratio for row in result.collect()) def test_cleaning___korean___filter_by_ratio_chars(): from etl.cleaning.korean import cleaning___korean___filter_by_ratio # noqa filter_type = "char" korean_ratio = 0.6 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ { "name": "helper___test___generate_korean", "args": { "faker_seed": faker_seed, "random_seed": random_seed, "n": 1000, "create_type": filter_type, }, }, { "name": "cleaning___korean___filter_by_ratio", "args": { "subset": "text", "korean_ratio": korean_ratio, "filter_type": filter_type, }, }, ] } ) _, result = etl_pipeline.run(ETL_config) print(result.collect()) assert any(row["korean_ratio"] < korean_ratio for row in result.collect()) @register_etl def helper___test___generate_korean_emoticon(spark, *args, **kwargs): data = spark.createDataFrame( [(1, "안녕하세요ㅋㅋㅋㅋㅋ"), (2, "ㅎㅎㅎㅎㅎ잘 지내세요?"), (3, "그래요ㅜㅜㅜㅜ"), (4, "ㅋㅋ쿵ㅜㅜㅋ쿠ㅜㅜ")], ["id", "text"], ) return data def test_cleaning___korean___reduce_emoticon(): from etl.cleaning.korean import cleaning___korean___reduce_emoticon # noqa etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_korean_emoticon"}, {"name": "cleaning___korean___reduce_emoticon"}, ] } ) _, result = etl_pipeline.run(ETL_config) expected_result = [ (1, "안녕하세요ㅋㅋ"), (2, "ㅎㅎ잘 지내세요?"), (3, "그래요ㅜㅜ"), (4, "ㅋㅋ쿵ㅜㅜㅋㅋㅜㅜ"), ] for expected, result_row in zip(expected_result, result.collect()): assert ( expected[1] == result_row["text"] ), f'Expected {expected[1]}, but got {result_row["text"]}' ================================================ FILE: dataverse/tests/test_cleaning_length.py ================================================ import pytest from faker import Faker from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl faker_seed = 42 @register_etl def helper___test___generate_data_for_test_length(spark, n=10, faker_seed=None, *args, **kwargs): faker = Faker() if faker_seed is not None: Faker.seed(faker_seed) data = [] for _ in range(n): fake_data = faker.paragraph() data.append((fake_data, len(fake_data), len(fake_data.split()))) data.append(("", len(""), len("".split()))) df = spark.createDataFrame(data, ["text", "char_length", "word_length"]) return df def test_cleaning___length___char_len_filter(): from etl.cleaning.length import cleaning___length___char_len_filter # noqa: F401 etl_pipeline = ETLPipeline() original = OmegaConf.create( {"etl": [{"name": "helper___test___generate_data_for_test_length"}]} ) _, result = etl_pipeline.run(original) max_value, min_value = ( result.select("char_length").rdd.max()[0], result.select("char_length").rdd.min()[0], ) print("*----------------------------*") print(f"max length of test data is {max_value}. min length of test data is {min_value}") min_max_both = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, { "name": "cleaning___length___char_len_filter", "args": {"min_len": 3, "max_len": 40}, }, ] } ) _, result = etl_pipeline.run(min_max_both) assert all((row["char_length"] >= 3) and (row["char_length"] <= 40) for row in result.collect()) assert result.filter(lambda row: row["text"] == "").count() == 0 min_only = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, {"name": "cleaning___length___char_len_filter", "args": {"min_len": 3}}, ] } ) _, result = etl_pipeline.run(min_only) assert all(row["char_length"] >= 3 for row in result.collect()) assert result.filter(lambda row: row["text"] == "").count() == 0 max_only = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, {"name": "cleaning___length___char_len_filter", "args": {"max_len": 3}}, ] } ) _, result = etl_pipeline.run(max_only) assert all(row["char_length"] <= 3 for row in result.collect()) assert result.filter(lambda row: row["text"] == "").count() > 0 nothing_given = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, {"name": "cleaning___length___char_len_filter"}, ] } ) with pytest.raises(AssertionError): _, result = etl_pipeline.run(nothing_given) def test_cleaning___length___word_len_filter(): from etl.cleaning.length import cleaning___length___word_len_filter # noqa: F401 etl_pipeline = ETLPipeline() original = OmegaConf.create( {"etl": [{"name": "helper___test___generate_data_for_test_length"}]} ) _, result = etl_pipeline.run(original) max_value, min_value = ( result.select("word_length").rdd.max()[0], result.select("word_length").rdd.min()[0], ) print("*----------------------------*") print(f"max length of test data is {max_value}. min length of test data is {min_value}") min_max_both = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, { "name": "cleaning___length___word_len_filter", "args": {"min_len": 3, "max_len": 40}, }, ] } ) _, result = etl_pipeline.run(min_max_both) assert all((row["word_length"] >= 3) and (row["word_length"] <= 40) for row in result.collect()) assert result.filter(lambda row: row["text"] == "").count() == 0 min_only = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, {"name": "cleaning___length___word_len_filter", "args": {"min_len": 3}}, ] } ) _, result = etl_pipeline.run(min_only) assert all(row["word_length"] >= 3 for row in result.collect()) assert result.filter(lambda row: row["text"] == "").count() == 0 max_only = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, {"name": "cleaning___length___word_len_filter", "args": {"max_len": 3}}, ] } ) _, result = etl_pipeline.run(max_only) assert all(row["word_length"] <= 3 for row in result.collect()) assert result.filter(lambda row: row["text"] == "").count() > 0 nothing_given = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_data_for_test_length"}, {"name": "cleaning___length___word_len_filter"}, ] } ) with pytest.raises(AssertionError): _, result = etl_pipeline.run(nothing_given) ================================================ FILE: dataverse/tests/test_cleaning_number.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl @register_etl def helper___test___generate_number(spark, *args, **kwargs): data = [ ("1234 apples and 5678 oranges ",), ("9876.54321 dollars",), ("This is random 1-3462-01.xx 87",), ("**6*342* history 0.6242 00002",), ("#eff000, af2f33, random color codes 1110 (013-0802-1143)",), ("88888888888888888888-888",), ] df = spark.createDataFrame(data, ["text"]) return df def test_cleaning___number___normalize(): from etl.cleaning.number import cleaning___number___normalize # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_number"}, {"name": "cleaning___number___normalize", "args": {"assign_number": 8}}, ] } ) spark, result = etl_pipeline.run(ETL_config) result_df = result.toDF() expected_data = [ ("8888 apples and 8888 oranges ",), ("8888.88888 dollars",), ("This is random 8-8888-88.xx 88",), ("**8*888* history 8.8888 88888",), ("#eff888, af8f88, random color codes 8888 (888-8888-8888)",), ("88888888888888888888-888",), ] expected_df = spark.createDataFrame(expected_data, ["text"]) assert expected_df.collect() == result_df.collect() ================================================ FILE: dataverse/tests/test_cleaning_table.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl @register_etl def helper___test___generate_table(spark, *args, **kwargs): data = [(1, 2, "duck"), (3, 4, "duck"), (5, 6, "ducky")] df = spark.createDataFrame(data, ["column1", "column2", "species"]) return df def test_cleaning___table___merge_col_vertical(): from etl.cleaning.table import cleaning___table___merge_col_vertical # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_table"}, { "name": "cleaning___table___merge_col_vertical", "args": { "col1": "column1", "col2": "column2", "merge_col_name": "number", }, }, ] } ) spark, result = etl_pipeline.run(ETL_config) expected_data = [ ("duck", 1), ("duck", 3), ("ducky", 5), ("duck", 2), ("duck", 4), ("ducky", 6), ] expected_df = spark.createDataFrame(expected_data, ["species", "number"]) assert result.collect() == expected_df.collect() ================================================ FILE: dataverse/tests/test_cleaning_unicode.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl @register_etl def helper___test___generate_unicode_data(spark, *args, **kwargs): data = [ (",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%►",), ("Hello, world!!0 1 2 ñ",), ("This is fun。ction for —dataverse━",), ("You can use 《dataverse》” for your ETL cycle?",), ("Test sentence.",), ("~~~~",), ] df = spark.createDataFrame(data, ["text"]) return df def helper___test___generate_expected_unicode_data(spark, type="remove"): assert type in ["remove", "replace", "normalize"] if type == "remove": expected_data = [ ("",), ("Hello world0 1 2 ñ",), ("This is function for dataverse",), ("You can use dataverse for your ETL cycle",), ("Test sentence.",), ("",), ] elif type == "replace": expected_data = [ (''',.,""""""""""'::?!();- - . ~'...-<>[]%-''',), ("Hello, world!!0 1 2 ñ",), ("This is fun.ction for - dataverse-",), ('You can use "dataverse"" for your ETL cycle?',), ("Test sentence.",), ("~~~~",), ] else: # type == "normalize" expected_data = [ (",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%►",), ("Hello, world!!0 1 2 ñ",), ("This is fun。ction for —dataverse━",), ("You can use 《dataverse》” for your ETL cycle?",), ("Test sentence.",), ("~~~~",), ] return spark.createDataFrame(expected_data, ["text"]) def test_cleaning___unicode___remove_punct(): from etl.cleaning.unicode import cleaning___unicode___remove_punct # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_unicode_data"}, {"name": "cleaning___unicode___remove_punct"}, ] } ) spark, result = etl_pipeline.run(ETL_config) expected = helper___test___generate_expected_unicode_data(spark, type="remove") assert all( result_row["text"] == expected_row["text"] for (result_row, expected_row) in zip(result.collect(), expected.collect()) ) def test_cleaning___unicode___replace_punct(): from etl.cleaning.unicode import cleaning___unicode___replace_punct # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_unicode_data"}, {"name": "cleaning___unicode___replace_punct"}, ] } ) spark, result = etl_pipeline.run(ETL_config) expected = helper___test___generate_expected_unicode_data(spark, type="replace") assert all( result_row["text"] == expected_row["text"] for (result_row, expected_row) in zip(result.collect(), expected.collect()) ) def test_cleaning___unicode___normalize(): from etl.cleaning.unicode import cleaning___unicode___normalize # noqa: F401 etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_unicode_data"}, {"name": "cleaning___unicode___normalize"}, ] } ) spark, result = etl_pipeline.run(ETL_config) expected = helper___test___generate_expected_unicode_data(spark, type="normalize") assert all( result_row["text"] == expected_row["text"] for (result_row, expected_row) in zip(result.collect(), expected.collect()) ) ================================================ FILE: dataverse/tests/test_deduplication_common_crawl.py ================================================ from omegaconf import OmegaConf from pyspark.sql import Row from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl @register_etl def helper___test___generate_exact_line(spark, *args, **kwagrs): data = [ Row(text="DataversE\ndATAVERSE\nQuack\nQUaCk\nquack", line_ids=[0, 2]), Row(text="hello\nHELLO\nWorld\nWoRLD", line_ids=[0, 2]), ] df = spark.createDataFrame(data) return df def test_deduplication___common_crawl___exact_line(): from etl.deduplication.common_crawl import ( # noqa: F401 deduplication___common_crawl___exact_line, ) etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_exact_line"}, {"name": "deduplication___common_crawl___exact_line"}, ] } ) spark, result = etl_pipeline.run(ETL_config) result = spark.createDataFrame(result) expected = [{"text": "DataversE\nQuack"}, {"text": "hello\nWorld"}] expected = spark.createDataFrame(expected) assert set(result.collect()) == set(expected.collect()) ================================================ FILE: dataverse/tests/test_deduplication_exact.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl @register_etl def helper___test___generate_duplicated_data(spark, *args, **kwargs): data = [(1, "dataverse"), (2, "apple"), (3, "dataverse"), (4, "carrot")] columns = ["id", "text"] df = spark.createDataFrame(data, columns) return df def test_deduplication___exact_column(): from etl.deduplication.exact import deduplication___exact___column # noqa: F401 etl_pipeline = ETLPipeline() columns = ["id", "text"] # subset : text ETL_config_text = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_duplicated_data"}, { "name": "deduplication___exact___column", "args": {"subset": ["text"]}, }, ] } ) spark, result_text = etl_pipeline.run(ETL_config_text) expected_text = [(1, "dataverse"), (2, "apple"), (4, "carrot")] expected_text = spark.createDataFrame(expected_text, schema=columns) assert sorted(result_text.collect()) == sorted(expected_text.collect()) # subset : text and id ETL_config_text_id = OmegaConf.create( { "etl": [ {"name": "helper___test___generate_duplicated_data"}, { "name": "deduplication___exact___column", "args": {"subset": ["text", "id"]}, }, ] } ) spark, result_text_id = etl_pipeline.run(ETL_config_text_id) expected_text_id = [(1, "dataverse"), (2, "apple"), (3, "dataverse"), (4, "carrot")] expected_text_id = spark.createDataFrame(expected_text_id, schema=columns) assert set(result_text_id.collect()) == set(expected_text_id.collect()) ================================================ FILE: dataverse/tests/test_deduplication_minhash.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl ================================================ FILE: dataverse/tests/test_deduplication_polyglot.py ================================================ from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl @register_etl def helper___test___create_data_for_polyglot_minhash(spark, *args, **kwargs): data = [ {"text": "hello wolrd! Welcome to dataverse."}, {"text": "hello wolrd! Welcome to dataverrrrse."}, {"text": "a totally different sentence"}, ] df = spark.createDataFrame(data) return df def test_deduplication___polyglot___minhash(): from etl.deduplication.polyglot import ( # noqa: F401 deduplication___polyglot___minhash, ) etl_pipeline = ETLPipeline() ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_polyglot_minhash"}, { "name": "deduplication___polyglot___minhash", "args": { "expand_size": 64, "n_gram": 2, "seed": 1, "char_level": False, "sim_threshold": 0.2, }, }, ] } ) _, result = etl_pipeline.run(ETL_config) assert result.count() == 2 texts = set(map(lambda x: x["text"], result.collect())) assert ("hello wolrd! Welcome to dataverse." in texts) or ( "hello wolrd! Welcome to dataverrrrse." in texts ) assert "a totally different sentence" in texts ================================================ FILE: dataverse/tests/test_pii_card.py ================================================ import re from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl sample_card = "1234-1234-1234-1234" @register_etl def helper___test___create_data_for_pii_card(spark, *args, **kwargs): return spark.createDataFrame([{"text": f"Your card No. is {sample_card}"}]) def test_pii___card___replace(): from etl.pii.card import pii___card___replace_card_number # noqa: F401 etl_pipeline = ETLPipeline() # Case 1: replace with random number ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_pii_card"}, { "name": "pii___card___replace_card_number", "args": {"random_pii": True}, }, ] } ) _, result = etl_pipeline.run(ETL_config) assert re.match(r"(\d{4}-\d{4}-\d{4}-\d{4})", result.collect()[0]["text"]) != sample_card # Case 2: replace with replace token ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_pii_card"}, { "name": "pii___card___replace_card_number", "args": {"replace_pii": True, "replace_token": "[CARD_NUMBER]"}, }, ] } ) _, result = etl_pipeline.run(ETL_config) expected = "Your card No. is [CARD_NUMBER]" assert result.collect()[0]["text"] == expected # Case 3: replace with replace token and add start, end token ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_pii_card"}, { "name": "pii___card___replace_card_number", "args": { "replace_pii": True, "replace_token": "[CARD_NUMBER]", "start_token": "[CARD_START]", "end_token": "[CARD_END]", }, }, ] } ) _, result = etl_pipeline.run(ETL_config) expected = "Your card No. is [CARD_START][CARD_NUMBER][CARD_END]" assert result.collect()[0]["text"] == expected # Case 4: can't detect with different pattern ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_pii_card"}, { "name": "pii___card___replace_card_number", "args": { "pattern": r"(\d{5}-\d{5}-\d{5}-\d{5})", "replace_pii": True, "replace_token": "[CARD_NUMBER]", "start_token": "[CARD_START]", "end_token": "[CARD_END]", }, }, ] } ) _, result = etl_pipeline.run(ETL_config) expected = f"Your card No. is {sample_card}" assert result.collect()[0]["text"] == expected ================================================ FILE: dataverse/tests/test_pii_nin.py ================================================ import re from omegaconf import OmegaConf from dataverse.etl import ETLPipeline from dataverse.etl.registry import register_etl sample_nin = "240101-0111111" @register_etl def helper___test___create_data_for_korean_rnn(spark, *args, **kwargs): return spark.createDataFrame([{"text": f"nin is {sample_nin}"}]) def test_pii___nin___replace_korean_rnns(): from etl.pii.nin import pii___nin___replace_korean_rrn # noqa: F401 etl_pipeline = ETLPipeline() # Case 1: Random PII ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_korean_rnn"}, { "name": "pii___nin___replace_korean_rrn", "args": {"random_pii": True, "replace_pii": False}, }, ] } ) _, result = etl_pipeline.run(ETL_config) assert re.search(r"\d{6}-\d{7}", result.collect()[0]["text"]) != sample_nin # Case 2: Replace PII with Token ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_korean_rnn"}, { "name": "pii___nin___replace_korean_rrn", "args": {"replace_pii": True, "replace_token": "[REDACTED]"}, }, ] } ) _, result = etl_pipeline.run(ETL_config) assert result.collect()[0]["text"] == "nin is [REDACTED]" # Case 3: Add start and end tokens ETL_config = OmegaConf.create( { "etl": [ {"name": "helper___test___create_data_for_korean_rnn"}, { "name": "pii___nin___replace_korean_rrn", "args": { "replace_pii": True, "replace_token": "[REDACTED]", "start_token": "[START]", "end_token": "[END]", }, }, ] } ) _, result = etl_pipeline.run(ETL_config) assert result.collect()[0]["text"] == "nin is [START][REDACTED][END]" ================================================ FILE: dataverse/utils/README.md ================================================ # Utils > Common utilities ## API ## Format ## Setting ================================================ FILE: dataverse/utils/__init__.py ================================================ ================================================ FILE: dataverse/utils/analyze/README.md ================================================ # Analyze > gaining insight of whatever you want to know ## Naming Convention - `<target>_<function_name>` - e.g. `python_is_script_executable` - e.g. `jupyter_find_orginal_file` ================================================ FILE: dataverse/utils/analyze/__init__.py ================================================ from .python import python_is_script_executable from .pip import pip_get_package_path ================================================ FILE: dataverse/utils/analyze/pip.py ================================================ import pkg_resources def pip_get_package_path(package_name): try: package = pkg_resources.get_distribution(package_name) return package.location except pkg_resources.DistributionNotFound: print(f"Package '{package_name}' is not installed.") return None ================================================ FILE: dataverse/utils/analyze/python.py ================================================ import ast def python_is_script_executable(file_path, verbose=False): """ check if a python script is executable in other words, check if the python script does not contains any declaration nodes (imports, functions, classes, etc.) declaration nodes: - imports - functions - classes - variables Args: file_path (str): path to the python script to check Returns: bool: True if the python script is executable, False otherwise """ with open(file_path, 'r') as file: source_code = file.read() # Parse source code into an AST module = ast.parse(source_code) for node in module.body: if not isinstance(node, ( ast.Import, ast.ImportFrom, ast.FunctionDef, ast.ClassDef, ast.Assign )): if verbose: print("found executable code: {}".format(node)) return True if verbose: print("found no executable code") return False ================================================ FILE: dataverse/utils/api/README.md ================================================ # API > This is a collection of API wrapper utilities for external sources ## 🥹 Use `original API` as much as you can > **Recommend to use the `original API` as much as you can** rather than using this `wrapper`. **Because `original API` is universal and this `wrapper` is not.** This is just for **ease usage of some external sources**. This is not a MUST to use. If you feel like you can do it yourself, we strongly recommend to do so. Our purpose is to make a code easier to read and understand and normally `original API` is easy to read and understand for many people. This `wrapper` is just for some people who are not familiar with the `original API` or want to make a code more readable. ### ✅ Recommended (`original API`) ```python import boto3 s3 = boto3.client("s3") buckets = s3.list_buckets()['Buckets'] bucket_names = [] for bucket in buckets: bucket_names.append(bucket['Name']) ``` ### ❌ Not Recommended (`wrapper`) ```python from dataverse.utils.api import aws_s3_list_buckets bucket_names = aws_s3_list_buckets() ``` ## Support API - aws ## Naming Convention - `<api_name>_<function_name>` - e.g. `aws_s3_upload_file` - e.g. `aws_s3_download_file` ================================================ FILE: dataverse/utils/api/__init__.py ================================================ # AWS from .aws import AWSClient from .aws import EMRManager from .aws import aws_check_credentials from .aws import aws_get_state from .aws import aws_set_state # EC2 from .aws import aws_ec2_instance_at_az from .aws import aws_ec2_instance_info from .aws import aws_ec2_all_instance_info from .aws import aws_ec2_get_price # SSM from .aws import aws_ssm_run_commands # VPC from .aws import aws_vpc_create from .aws import aws_vpc_delete from .aws import aws_subnet_create from .aws import aws_subnet_delete from .aws import aws_subnet_az from .aws import aws_emr_security_group_create from .aws import aws_security_group_delete from .aws import aws_security_group_remove_dependency from .aws import aws_gateway_create from .aws import aws_gateway_delete from .aws import aws_route_table_create from .aws import aws_route_table_delete from .aws import aws_route_table_asscociate_subnet from .aws import aws_elastic_ip_allocate from .aws import aws_elastic_ip_release from .aws import aws_nat_gateway_create from .aws import aws_nat_gateway_delete from .aws import aws_iam_role_create from .aws import aws_iam_role_delete from .aws import aws_iam_instance_profile_create from .aws import aws_iam_instance_profile_delete # S3 from .aws import aws_s3_path_parse from .aws import aws_s3_create_bucket from .aws import aws_s3_delete_bucket from .aws import aws_s3_read from .aws import aws_s3_download from .aws import aws_s3_upload from .aws import aws_s3_write from .aws import aws_s3_delete from .aws import aws_s3_list_buckets from .aws import aws_s3_ls from .aws import aws_s3_get_object_type ================================================ FILE: dataverse/utils/api/aws.py ================================================ """ Usage: ```python from dataverse.utils.api import aws_s3_list_buckets from dataverse.utils.api import aws_s3_list aws_s3_list_buckets() aws_s3_list("bucket") ``` """ import os import glob import re import shutil import tarfile import tempfile import json import time import boto3 import datetime import ipaddress import pkg_resources from omegaconf import OmegaConf from dataverse.utils.analyze import python_is_script_executable # TODO: get the information from AWS when it's supported someday # reference - https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-emr-supported-instance-types.html EMR_SUPPORTED_EC2_INSTANCES = [ "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m3.xlarge", "m3.2xlarge", "c1.medium", "c1.xlarge", "c3.xlarge", "c3.2xlarge", "c3.4xlarge", "c3.8xlarge", "cc1.4xlarge", "cc2.8xlarge", "c4.large", "c4.xlarge", "c4.2xlarge", "c4.4xlarge", "c4.8xlarge", "c5.xlarge", "c5.9xlarge", "c5.2xlarge", "c5.4xlarge", "c5.9xlarge", "c5.18xlarge", "c5d.xlarge", "c5d.2xlarge", "c5d.4xlarge", "c5d.9xlarge", "c5d.18xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "r3.xlarge", "r3.2xlarge", "r3.4xlarge", "r3.8xlarge", "cr1.8xlarge", "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "m4.16large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", "m5d.xlarge", "m5d.2xlarge", "m5d.4xlarge", "m5d.12xlarge", "m5d.24xlarge", "r4.large", "r4.xlarge", "r4.2xlarge", "r4.4xlarge", "r4.8xlarge", "r4.16xlarge", "h1.4xlarge", "hs1.2xlarge", "hs1.4xlarge", "hs1.8xlarge", "i2.xlarge", "i2.2xlarge", "i2.4xlarge", "i2.8xlarge", "d2.xlarge", "d2.2xlarge", "d2.4xlarge", "d2.8xlarge", "g2.2xlarge", "cg1.4xlarge" ] def aws_check_credentials(verbose=True): """ simple check if aws credentials are valid Returns: bool: True if valid, False if not valid """ sts = boto3.client('sts') try: sts.get_caller_identity() return True except Exception as e: if verbose: print(e) return False class AWSClient: """ AWS Client Information """ # Singleton _initialized = False def __new__(cls): if not hasattr(cls, 'instance'): cls.instance = super(AWSClient, cls).__new__(cls) return cls.instance def __init__(self): if self._initialized: return self.region = boto3.session.Session().region_name if self.region is None: raise Exception("AWS Region is not set. Set the AWS Region with `aws configure`") self.sts = boto3.client('sts') self.iam = boto3.client('iam') self.s3 = boto3.client('s3') self.ec2 = boto3.client('ec2') self.emr = boto3.client('emr') self.ssm = boto3.client('ssm') self.user_id = self.sts.get_caller_identity()['UserId'] self.account_id = self.sts.get_caller_identity()['Account'] self._initialized = True def __str__(self) -> str: self.__repr__() def __repr__(self) -> str: return f"AWSClient(region={self.region}, user_id={self.user_id})" # -------------------------------------------------------------------------------- # AWS State """ [ What is State? ] >>> state management of operating aws services for dataverse state will be managed by python dictionary and saved as json file in aws s3. This will be synced with running AWS services and it will be created for each user. [ stored information ] - cache, meta, config, codes, etc. """ def aws_get_state(): # to avoid circular import from dataverse.utils.setting import SystemSetting aws_bucket = SystemSetting()['AWS_BUCKET'] state_path = f'{AWSClient().user_id}/state.json' # get state from aws s3 try: content = aws_s3_read(aws_bucket, state_path) state = json.loads(content) # FIXME: exception should distinguish between key not found and other errors except: state = {} aws_s3_write(aws_bucket, state_path, json.dumps(state)) return state def aws_set_state(state): # to avoid circular import from dataverse.utils.setting import SystemSetting aws_bucket = SystemSetting()['AWS_BUCKET'] state_path = f'{AWSClient().user_id}/state.json' aws_s3_write(aws_bucket, state_path, json.dumps(state)) # -------------------------------------------------------------------------------- # AWS EC2 Resource def aws_ec2_instance_at_az(az): """ get all instance info at the given AZ """ response = AWSClient().ec2.describe_instance_type_offerings( LocationType='availability-zone', Filters=[ { 'Name': 'location', 'Values': [ az, ] }, ] ) instances = [inst['InstanceType'] for inst in response['InstanceTypeOfferings']] return instances def aws_ec2_instance_info(instance): """ get instance info from aws """ response = AWSClient().ec2.describe_instance_types( InstanceTypes=[instance], ) return response def aws_ec2_all_instance_info(): """ get all instance types information """ instance_info = {} token = '' while True: if token == '': response = AWSClient().ec2.describe_instance_types() else: response = AWSClient().ec2.describe_instance_types(NextToken=token) for instance_type in response['InstanceTypes']: instance_info[instance_type['InstanceType']] = { 'vcpu': instance_type['VCpuInfo']['DefaultVCpus'], 'memory': instance_type['MemoryInfo']['SizeInMiB'] } if 'NextToken' in response: token = response['NextToken'] else: break return instance_info def aws_ec2_get_price(instance_type): response = AWSClient().ec2.describe_spot_price_history( InstanceTypes=[instance_type], ProductDescriptions=['Linux/UNIX (Amazon VPC)'], StartTime=datetime.datetime.now(), MaxResults=1, ) return response['SpotPriceHistory'][0]['SpotPrice'] # -------------------------------------------------------------------------------- # AWS SSM (Systems Manager) def aws_ssm_run_commands(instance_ids, commands, verbose=True, return_output=False): """ Run commands on a list of EC2 instances using AWS SSM. """ if return_output: results = {} for command in commands: if verbose: print(f"Sending following command to all instances...") print("==========================================") print(command) print("==========================================") command_id = AWSClient().ssm.send_command( InstanceIds=instance_ids, DocumentName="AWS-RunShellScript", Parameters={"commands": [command]}, TimeoutSeconds=3600, )["Command"]["CommandId"] while True: # verify the previous step succeeded before running the next step. cmd_result = AWSClient().ssm.list_commands(CommandId=command_id)["Commands"][0] if cmd_result["StatusDetails"] == "Success": if verbose or return_output: command_invocation = AWSClient().ssm.get_command_invocation( CommandId=command_id, InstanceId=instance_ids[0], # assume all instances are the same ) if verbose: print("=========== Standard output ============") print(command_invocation["StandardOutputContent"]) print("==========================================") print(f"Command succeeded.") if return_output: results[command] = command_invocation["StandardOutputContent"] break elif cmd_result["StatusDetails"] in ["Pending", "InProgress"]: if verbose: print(f"Command status is {cmd_result['StatusDetails']}, waiting...") time.sleep(10) else: if verbose: print(f"Command status is {cmd_result['StatusDetails']}, quitting.") # get more detailed information about the command failure command_invocation = AWSClient().ssm.get_command_invocation( CommandId=command_id, InstanceId=instance_ids[0], # assume all instances are the same ) print("============= Error output ==============") print(command_invocation["StandardErrorContent"]) print("=========== Standard output ============") print(command_invocation["StandardOutputContent"]) print("==========================================") raise RuntimeError( f"Command failed to run. [ {cmd_result['StatusDetails']} ]" ) if return_output: return results # -------------------------------------------------------------------------------- # AWS EMR class EMRManager: """ one EMR manager per one EMR cluster """ def launch(self, config): """ auto setup environments and launch emr cluster Args: config (OmegaConf): config for the etl """ # clean unused resources self._clean() if config.emr.id is not None: config.emr.auto_generated = False return config.emr.id # TODO: modify interface for custom policy # create role & instance profile self._role_setup(config) self._instance_profile_setup(config) # create vpc self._vpc_setup(config) # create emr cluster # XXX: wait until instance profile is ready # otherwise, emr cluster creation will fail # FIXME: convert to smart solution (e.g. waiter) # currently AWS doesn't support waiter available option for instance profile # NOTE: I've tried to make waiter using `describe_instance_profile` but it didn't work time.sleep(7) # set default instance type self._set_default_instance(config) emr_id = self._emr_cluster_create(config) config.emr.id = emr_id config.emr.auto_generated = True return emr_id def _role_setup(self, config): """ TODO: modify interface for custom policy """ # [ EC2 ] -------------------------------------------------- ec2_trust_policy = { "Version": "2008-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } ec2_role = 'Dataverse_EMR_EC2_DefaultRole' ec2_policy = 'AmazonElasticMapReduceforEC2Role' ssm_policy = 'AmazonSSMManagedInstanceCore' # add timestamp to temporary role name timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") ec2_role = f"{ec2_role}_{timestamp}" ec2_policy_arns = [ f"arn:aws:iam::aws:policy/service-role/{ec2_policy}", f"arn:aws:iam::aws:policy/{ssm_policy}" ] aws_iam_role_create( role_name=ec2_role, trust_policy=ec2_trust_policy, policy_arns=ec2_policy_arns, description='Role for Dataverse EMR EC2', ) config.emr.role.ec2.name = ec2_role config.emr.role.ec2.policy_arns = ec2_policy_arns # [ EMR ] -------------------------------------------------- emr_trust_policy = { "Version": "2008-10-17", "Statement": [ { "Sid": "", "Effect": "Allow", "Principal": { "Service": "elasticmapreduce.amazonaws.com" }, "Action": "sts:AssumeRole", "Condition": { "StringEquals": { "aws:SourceAccount": AWSClient().account_id }, "ArnLike": { "aws:SourceArn": f"arn:aws:elasticmapreduce:{AWSClient().region}:{AWSClient().account_id}:*" } } } ] } emr_role = 'Dataverse_EMR_DefaultRole' emr_policy = 'AmazonElasticMapReduceRole' # add timestamp to temporary role name timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") emr_role = f"{emr_role}_{timestamp}" emr_policy_arns = [f"arn:aws:iam::aws:policy/service-role/{emr_policy}"] aws_iam_role_create( role_name=emr_role, trust_policy=emr_trust_policy, policy_arns=emr_policy_arns, description='Role for Dataverse EMR', ) config.emr.role.emr.name = emr_role config.emr.role.emr.policy_arns = emr_policy_arns def _instance_profile_setup(self, config): """ TODO: modify interface for custom policy """ ec2_role = config.emr.role.ec2.name instance_profile_name = 'Dataverse_EMR_EC2_DefaultRole_InstanceProfile' # add timestamp to temporary role name timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") instance_profile_name = f"{instance_profile_name}_{timestamp}" aws_iam_instance_profile_create( instance_profile_name=instance_profile_name, role_name=ec2_role, ) config.emr.instance_profile.name = instance_profile_name config.emr.instance_profile.ec2_role = ec2_role def _vpc_setup(self, config): """ config will be automatically updated """ # VPC vpc_id = aws_vpc_create() config.emr.vpc.id = vpc_id # if private subnet is required subnet_args = { 'vpc_id': vpc_id, 'tag_name': 'Dataverse-Temporary-Subnet-Public', } if not config.emr.subnet.public: vpcs = AWSClient().ec2.describe_vpcs(VpcIds=[vpc_id]) cidr_block = vpcs['Vpcs'][0]['CidrBlock'] ip_net = ipaddress.ip_network(cidr_block) # split the network into two subnets public_subnet, private_subnet = list(ip_net.subnets()) subnet_args['cird_block'] = str(public_subnet) # Subnet subnet_id = aws_subnet_create(**subnet_args) config.emr.subnet.id = subnet_id config.emr.subnet.public_id = subnet_id # Internet Gateway gateway_id = aws_gateway_create(vpc_id) config.emr.gateway.id = gateway_id # Route Table route_table_id = aws_route_table_create( vpc_id=vpc_id, gateway_id=gateway_id, destination_cidr_block='0.0.0.0/0', tag_name='Dataverse-Route-Table-Public', ) aws_route_table_asscociate_subnet(subnet_id, route_table_id) config.emr.route_table.id = route_table_id if not config.emr.subnet.public: # add NAT Gateway to public subnet elastic_ip_id = aws_elastic_ip_allocate(vpc_id=vpc_id) config.emr.elastic_ip.id = elastic_ip_id nat_gateway_id = aws_nat_gateway_create( vpc_id=vpc_id, subnet_id=subnet_id, elastic_ip_id=elastic_ip_id, ) config.emr.nat_gateway.id = nat_gateway_id # create private subnet private_subnet_id = aws_subnet_create( vpc_id=vpc_id, cird_block=str(private_subnet), tag_name='Dataverse-Temporary-Subnet-Private', ) config.emr.subnet.id = private_subnet_id config.emr.subnet.private_id = private_subnet_id # add NAT Gateway to private subnet private_route_table_id = aws_route_table_create( vpc_id=vpc_id, nat_gateway_id=nat_gateway_id, destination_cidr_block='0.0.0.0/0', tag_name='Dataverse-Route-Table-Private', ) aws_route_table_asscociate_subnet( subnet_id=private_subnet_id, route_table_id=private_route_table_id, ) # set state state = aws_get_state() state['vpc'][vpc_id]['public_subnet'] = config.emr.subnet.public aws_set_state(state) def _set_default_instance( self, config, min_memory=2048, max_memory=8192, ): """ choose default instance type by memory args: config (OmegaConf): config for the etl min_memory (int): minimum memory size (MiB) max_memory (int): maximum memory size (MiB) """ subnet_id = config.emr.subnet.id az = aws_subnet_az(subnet_id) instances = aws_ec2_instance_at_az(az=az) # find memory size is bigger specified min/max memory candidate = None _min_candidate_memory = float('inf') for instance in instances: # check if instance is supported by EMR if instance not in EMR_SUPPORTED_EC2_INSTANCES: continue instance_info = aws_ec2_instance_info(instance) memory = instance_info['InstanceTypes'][0]['MemoryInfo']['SizeInMiB'] if min_memory <= memory <= max_memory: if memory < _min_candidate_memory: candidate = instance _min_candidate_memory = memory if candidate is None: raise Exception(f"Unable to find instance type with memory between {min_memory} and {max_memory}") instance_info = aws_ec2_instance_info(candidate) vcpu = instance_info['InstanceTypes'][0]['VCpuInfo']['DefaultVCpus'] memory = instance_info['InstanceTypes'][0]['MemoryInfo']['SizeInMiB'] print( f"{'=' * 80}\n" f"Default instance type is [ {candidate} ]\n" f"{'=' * 80}\n" f" vCPU: {vcpu}\n" f" Memory: {memory}\n" f" Price: {aws_ec2_get_price(candidate)}\n" f"{'=' * 80}\n" ) if config.emr.master_instance.type is None: config.emr.master_instance.type = candidate if config.emr.core_instance.type is None: config.emr.core_instance.type = candidate if config.emr.task_instance.type is None: config.emr.task_instance.type = candidate def _emr_cluster_create(self, config): """ create aws emr cluster Args: config (OmegaConf): config for the etl """ # to avoid circular import from dataverse.utils.setting import SystemSetting log_dir = f"s3://{SystemSetting().AWS_BUCKET}/{AWSClient().user_id}/emr/logs" # instance group setting instance_groups = [ { 'Name': 'master nodes', 'Market': 'ON_DEMAND', 'InstanceRole': 'MASTER', 'InstanceType': config.emr.master_instance.type, 'InstanceCount': 1, }, { 'Name': 'core nodes', 'Market': 'ON_DEMAND', 'InstanceRole': 'CORE', 'InstanceType': config.emr.core_instance.type, 'InstanceCount': config.emr.core_instance.count, }, ] # task is optional if config.emr.task_instance.count > 0: instance_groups.append( { 'Name': 'task nodes', 'Market': 'ON_DEMAND', 'InstanceRole': 'TASK', 'InstanceType': config.emr.task_instance.type, 'InstanceCount': config.emr.task_instance.count, } ) # create emr cluster emr_id = AWSClient().emr.run_job_flow( Name=config.emr.name, ReleaseLabel=config.emr.release, AutoTerminationPolicy={ "IdleTimeout": config.emr.idle_timeout, }, Instances={ 'InstanceGroups': instance_groups, 'KeepJobFlowAliveWhenNoSteps': True, 'TerminationProtected': False, 'Ec2SubnetId': config.emr.subnet.id, }, Applications=[{'Name': 'Spark'}], VisibleToAllUsers=True, JobFlowRole=config.emr.instance_profile.name, ServiceRole=config.emr.role.emr.name, Tags=[ { 'Key': 'Name', 'Value': config.emr.name, }, ], LogUri=log_dir, )['JobFlowId'] # wait until emr cluster is ready waiter = AWSClient().emr.get_waiter('cluster_running') waiter.wait(ClusterId=emr_id) # set state state = aws_get_state() if 'emr' not in state: state['emr'] = {} state['emr'][emr_id] = { 'vpc_id': config.emr.vpc.id, } # instance profile if config.emr.instance_profile.name is not None: state['emr'][emr_id]['instance_profile'] = config.emr.instance_profile.name # role if 'role' not in state['emr'][emr_id]: state['emr'][emr_id]['role'] = {} if config.emr.role.emr.name is not None: state['emr'][emr_id]['role']['emr'] = config.emr.role.emr.name if config.emr.role.ec2.name is not None: state['emr'][emr_id]['role']['ec2'] = config.emr.role.ec2.name aws_set_state(state) config.emr.id = emr_id return emr_id def run(self, config, verbose=False): # setup environment self._setup(config, verbose=verbose) # run emr # get pip installed packages path location = self._get_pip_package_path(config, verbose=verbose) emr_main = os.path.join(location, 'dataverse', 'api', 'emr.py') response = AWSClient().emr.add_job_flow_steps( JobFlowId=config.emr.id, Steps=[ { 'Name': 'Run Dataverse python script on Master node', 'ActionOnFailure': 'CONTINUE', 'HadoopJarStep': { 'Jar': 'command-runner.jar', 'Args': [ 'python3', emr_main, '--config', '/home/hadoop/dataverse/config.yaml', ] } }, ] ) step_id = response['StepIds'][0] return step_id def _setup(self, config, verbose=False): """ [ upload to S3 ] - config for `dataverse` - dataverse site-packages source code - requirements.txt - dynamic etl files [ move s3 to ec2 ] - move uploaded files in S3 from local to EMR cluster [ setup environment on EMR cluster ] - set aws region - install pip dependencies for `dataverse` - set `dataverse` package at EMR cluster pip installed packages path """ # generate working directory self._get_working_dir(config) # upload to necessary dataverse files to S3 self._upload_config(config) self._upload_source_code(config) self._upload_dependencies(config) self._upload_dynamic_etl_files(config) # move uploaded files in S3 from local to EMR cluster self._move_s3_to_ec2(config, verbose=verbose) # setup environment on EMR cluster self._setup_aws(config, verbose=verbose) self._setup_dependencies(config, verbose=verbose) self._setup_source_code(config, verbose=verbose) def _get_working_dir(self, config): """ get working directory path for the emr cluster if not provided, it will be automatically generated """ # to avoid circular import from dataverse.utils.setting import SystemSetting if config.emr.working_dir is not None: working_dir = config.emr.working_dir if working_dir.startswith(('s3://', 's3a://', 's3n://')): aws_s3_matched = re.match(r's3[a,n]?://([^/]+)/(.*)', working_dir) if not aws_s3_matched: raise ValueError(f"EMR working directory {working_dir} is not a valid s3 path") else: # [ emr versioning ] - emr_YYYY-MM-DD_HH:MM:SS_<emr_id> # datetime first for ascending order bucket = SystemSetting()['AWS_BUCKET'] user_id = AWSClient().user_id working_dir_name = datetime.datetime.now().strftime(f"emr_%Y-%m-%d_%H:%M:%S_{config.emr.id}") working_dir = f"s3://{bucket}/{user_id}/emr/{working_dir_name}" config.emr.working_dir = working_dir return working_dir def _upload_config(self, config): """ upload config for `dataverse` to S3 """ working_dir = self._get_working_dir(config) bucket, key = aws_s3_path_parse(working_dir) aws_s3_write(bucket, f"{key}/config.yaml", OmegaConf.to_yaml(config)) def _upload_source_code(self, config): """ upload pip site-packages source code to S3 caveat: this doesn't include wheel files or meta data for pip packages """ # to avoid circular import from dataverse.utils.setting import SystemSetting temp_dir = tempfile.mkdtemp() zip_file = os.path.join(temp_dir, 'dataverse.tar.gz') dataverse_home = SystemSetting().DATAVERSE_HOME with tarfile.open(zip_file, "w:gz") as tar: tar.add(dataverse_home, arcname=os.path.basename(dataverse_home)) working_dir = self._get_working_dir(config) bucket, key = aws_s3_path_parse(working_dir) aws_s3_upload(bucket, f'{key}/dataverse.tar.gz', zip_file) shutil.rmtree(temp_dir) def _upload_dependencies(self, config, package_name="dataverse"): # get all dependencies requirements = [] for r in pkg_resources.get_distribution(package_name).requires(): requirements.append(str(r)) # create requirements.txt temp_dir = tempfile.mkdtemp() dependency_file = os.path.join(temp_dir, 'requirements.txt') with open(dependency_file, 'w') as f: for requirement in requirements: f.write(f"{requirement}\n") # upload requirements.txt to S3 working_dir = self._get_working_dir(config) bucket, key = aws_s3_path_parse(working_dir) aws_s3_upload(bucket, f'{key}/requirements.txt', dependency_file) shutil.rmtree(temp_dir) def _upload_dynamic_etl_files(self, config): # to avoid circular import from dataverse.etl import ETLRegistry # get all etl files dynamic_etl_file_paths = [] for etl in ETLRegistry().get_all(): # not part of the dataverse source but dynamically loaded by user if not etl.__etl_dir__: file_path = etl.__file_path__ # jupyter notebook is not supported # TODO: allow jupyter notebook # NOTE: reason why jupyter notebook is not supported is because # the filename point at the temporary file path not the `.ipynb` file if 'ipykernel' in file_path: raise ValueError( 'Dynamic ETL from jupyter notebook not supported. Only from .py files\n' f"[ {file_path} ] is given which is temporary jupyter cell execution file\n" ) # only declaration is allowed # TODO: analyze the code and only parse necessary dynamic etl code # NOTE: this is to prevent execution of the code if python_is_script_executable(file_path): raise ValueError( 'Dynamic ETL file should only contain declaration (imports, functions, classes, etc.)' f"[ {file_path} ] includes execution.\n" ) # check not from of jupyter notebook dynamic_etl_file_paths.append(file_path) # upload etl files to S3 working_dir = self._get_working_dir(config) bucket, key = aws_s3_path_parse(working_dir) # if dynamic_etl dir exists, remove it # NOTE: this is to prevent old files from being uploaded # in case that user is using setup multiple times with same working_dir try: aws_s3_delete(bucket, f'{key}/dynamic_etl') except: pass for file_path in dynamic_etl_file_paths: aws_s3_upload( bucket=bucket, key=f'{key}/dynamic_etl/{os.path.basename(file_path)}', local_path=file_path ) def _move_s3_to_ec2(self, config, verbose=False): """ move uploaded files in S3 from local to EMR cluster """ nodes = AWSClient().emr.list_instances( ClusterId=config.emr.id )["Instances"] instance_ids = [node["Ec2InstanceId"] for node in nodes] # remove existing dataverse directory commands = [ "rm -r /home/hadoop/dataverse", ] try: aws_ssm_run_commands(instance_ids, commands, verbose=verbose) except: pass commands = [ f"aws s3 cp {config.emr.working_dir} /home/hadoop/dataverse --recursive", ] aws_ssm_run_commands(instance_ids, commands, verbose=verbose) def _get_pip_package_path(self, config, verbose=False): """ get pip installed packages path """ nodes = AWSClient().emr.list_instances( ClusterId=config.emr.id )["Instances"] instance_ids = [node["Ec2InstanceId"] for node in nodes] commands = ["pip3 show numpy"] result = aws_ssm_run_commands( instance_ids, commands, verbose=verbose, return_output=True, ) location = re.findall(r'Location: (.*)\n', result['pip3 show numpy'])[0] return location def _setup_aws(self, config, verbose=False): """ setup aws environment on EMR cluster """ nodes = AWSClient().emr.list_instances( ClusterId=config.emr.id )["Instances"] instance_ids = [node["Ec2InstanceId"] for node in nodes] commands = [ f"aws configure set region {AWSClient().region}", ] aws_ssm_run_commands(instance_ids, commands, verbose=verbose) def _setup_dependencies(self, config, verbose=False): nodes = AWSClient().emr.list_instances( ClusterId=config.emr.id )["Instances"] instance_ids = [node["Ec2InstanceId"] for node in nodes] commands = [ "sudo yum install -y python3-devel", "pip3 install wheel setuptools pip --upgrade", ] aws_ssm_run_commands(instance_ids, commands, verbose=verbose) # NOTE: unknown unlimited loop caused by `pip3 install -r requirements.txt` # so I split the following command separately commands = [ "pip3 install -r /home/hadoop/dataverse/requirements.txt", ] aws_ssm_run_commands(instance_ids, commands, verbose=verbose) def _setup_source_code(self, config, verbose=False): """ copy dataverse source code to pip installed packages path """ nodes = AWSClient().emr.list_instances( ClusterId=config.emr.id )["Instances"] instance_ids = [node["Ec2InstanceId"] for node in nodes] # unzip dataverse.tar.gz and copy to pip installed packages path commands = [ "tar -xzf /home/hadoop/dataverse/dataverse.tar.gz -C /home/hadoop/dataverse", ] aws_ssm_run_commands(instance_ids, commands, verbose=verbose) # get pip installed packages path location = self._get_pip_package_path(config, verbose=verbose) # copy dataverse source code to pip installed packages path commands = [ f"cp -r /home/hadoop/dataverse/dataverse {location}", ] aws_ssm_run_commands(instance_ids, commands, verbose=verbose) def wait(self, config, step_id, verbose=True): """ waiter for emr step """ while True: response = AWSClient().emr.describe_step( ClusterId=config.emr.id, StepId=step_id, ) state = response['Step']['Status']['State'] if state == 'Pending': time.sleep(10) if verbose: print("[ Dataverse ] step pending...") continue if state in ['COMPLETED', 'FAILED', 'CANCELLED']: if verbose: print(f"[ Dataverse ] step status: {state}. Done.") break if verbose: if 'Message' in response['Step']['Status']['StateChangeReason']: print(response['Step']['Status']['StateChangeReason']['Message']) time.sleep(10) def terminate(self, config): """ terminate emr cluster Args: config (OmegaConf): config for the etl """ # only terminate auto generated emr cluster if config.emr.auto_generated is False: print('EMR cluster is not auto generated. Not terminating.') return if config.emr.id is None: print('EMR cluster is not launched. Proceeding to clean resources.') else: AWSClient().emr.terminate_job_flows(JobFlowIds=[config.emr.id]) # wait until emr cluster is terminated waiter = AWSClient().emr.get_waiter('cluster_terminated') waiter.wait(ClusterId=config.emr.id) # set state state = aws_get_state() if 'emr' in state and config.emr.id in state['emr']: del state['emr'][config.emr.id] aws_set_state(state) # clean unused resources self._clean() def _clean(self): """ clean unused resources related to EMR """ self._clean_stopped_emr() self._clean_unused_vpc() self._clean_unused_iam_instance_profile() self._clean_unused_iam_role() def _clean_stopped_emr(self): """ check stopped EMR and update the state """ state = aws_get_state() # get all emr ids emr_ids = [] if 'emr' in state: for emr_id in state['emr']: emr_ids.append(emr_id) # remove stopped emr from state REMOVE_STATES = [ 'TERMINATED', 'TERMINATED_WITH_ERRORS' ] for emr_id in emr_ids: emr_info = AWSClient().emr.describe_cluster(ClusterId=emr_id) if emr_info['Cluster']['Status']['State'] in REMOVE_STATES: del state['emr'][emr_id] aws_set_state(state) def _clean_unused_vpc(self): """ check the AWS state and clean vpc that is not used by any emr cluster """ state = aws_get_state() # get all vpc ids that are used by emr used_vpc_ids = [] if 'emr' in state: for emr_id in state['emr']: used_vpc_ids.append(state['emr'][emr_id]['vpc_id']) # get all vpc ids that are created all_vpc_ids = [] if 'vpc' in state: for vpc_id in state['vpc']: all_vpc_ids.append(vpc_id) # clean unused vpc unused_vpc_ids = list(set(all_vpc_ids) - set(used_vpc_ids)) for vpc_id in unused_vpc_ids: aws_vpc_delete(vpc_id) def _clean_unused_iam_role(self): """ check the AWS state and clean iam role that is not used by any emr cluster """ state = aws_get_state() # get all iam role names that are used by emr used_iam_role_names = [] if 'emr' in state: for emr_id in state['emr']: if 'ec2' in state['emr'][emr_id]['role']: used_iam_role_names.append(state['emr'][emr_id]['role']['ec2']) if 'emr' in state['emr'][emr_id]['role']: used_iam_role_names.append(state['emr'][emr_id]['role']['emr']) # get all iam role names that are created all_iam_role_names = [] if 'iam' in state and 'role' in state['iam']: for role_name in state['iam']['role']: all_iam_role_names.append(role_name) # clean unused iam role unused_iam_role_names = list(set(all_iam_role_names) - set(used_iam_role_names)) for role_name in unused_iam_role_names: aws_iam_role_delete(role_name) def _clean_unused_iam_instance_profile(self): """ check the AWS state and clean iam instance profile that is not used by any emr cluster """ state = aws_get_state() # get all iam instance profile names that are used by emr used_iam_instance_profile_names = [] if 'emr' in state: for emr_id in state['emr']: used_iam_instance_profile_names.append(state['emr'][emr_id]['instance_profile']) # get all iam instance profile names that are created all_iam_instance_profile_names = [] if 'iam' in state and 'instance_profile' in state['iam']: for instance_profile_name in state['iam']['instance_profile']: all_iam_instance_profile_names.append(instance_profile_name) # clean unused iam instance profile unused_iam_instance_profile_names = list(set(all_iam_instance_profile_names) - set(used_iam_instance_profile_names)) for instance_profile_name in unused_iam_instance_profile_names: aws_iam_instance_profile_delete(instance_profile_name) def terminate_by_id(self, emr_id): """ when you want to terminate emr cluster without config ```python from dataverse.utils.api import EMRManager EMRManager().terminate_by_id('j-3C05XDxxxxxxx') ``` """ # to avoid circular import from dataverse.config import Config config = Config.default(emr=True) config.emr.id = emr_id self.terminate(config) # -------------------------------------------------------------------------------- def aws_iam_role_create( role_name, trust_policy, policy_arns, description='Role for Dataverse', max_session_duration=3600, ): # create role try: AWSClient().iam.create_role( RoleName=role_name, Description=description, AssumeRolePolicyDocument=json.dumps(trust_policy), MaxSessionDuration=max_session_duration, ) # attach policy for policy_arn in policy_arns: AWSClient().iam.attach_role_policy( RoleName=role_name, PolicyArn=policy_arn, ) # set state state = aws_get_state() if 'iam' not in state: state['iam'] = {} if 'role' not in state['iam']: state['iam']['role'] = {} state['iam']['role'][role_name] = { 'policy_arns': policy_arns, } aws_set_state(state) except AWSClient().iam.exceptions.EntityAlreadyExistsException: print(f"{role_name} already exists.") except Exception as e: raise e # wait until role is ready waiter = AWSClient().iam.get_waiter('role_exists') waiter.wait(RoleName=role_name) def aws_iam_role_delete(role_name): try: # detach policy response = AWSClient().iam.list_attached_role_policies(RoleName=role_name) for policy in response['AttachedPolicies']: AWSClient().iam.detach_role_policy( RoleName=role_name, PolicyArn=policy['PolicyArn'], ) # delete role AWSClient().iam.delete_role(RoleName=role_name) except AWSClient().iam.exceptions.NoSuchEntityException: print(f"{role_name} does not exist.") except Exception as e: raise e # set state state = aws_get_state() if 'iam' in state and 'role' in state['iam']: if role_name in state['iam']['role']: del state['iam']['role'][role_name] aws_set_state(state) def aws_iam_instance_profile_create(instance_profile_name, role_name): try: AWSClient().iam.create_instance_profile( InstanceProfileName=instance_profile_name ) AWSClient().iam.add_role_to_instance_profile( InstanceProfileName=instance_profile_name, RoleName=role_name ) # set state state = aws_get_state() if 'iam' not in state: state['iam'] = {} if 'instance_profile' not in state['iam']: state['iam']['instance_profile'] = {} state['iam']['instance_profile'][instance_profile_name] = { 'role_name': role_name, } aws_set_state(state) except AWSClient().iam.exceptions.EntityAlreadyExistsException: print(f"{instance_profile_name} already exists.") except Exception as e: raise e # wait until instance profile is ready waiter = AWSClient().iam.get_waiter('instance_profile_exists') waiter.wait(InstanceProfileName=instance_profile_name) # FIXME: wait until instance profile is available ... def aws_iam_instance_profile_delete(instance_profile_name): # remove role from instance profile response = AWSClient().iam.get_instance_profile(InstanceProfileName=instance_profile_name) role_name = response['InstanceProfile']['Roles'][0]['RoleName'] AWSClient().iam.remove_role_from_instance_profile( InstanceProfileName=instance_profile_name, RoleName=role_name, ) # delete instance profile AWSClient().iam.delete_instance_profile(InstanceProfileName=instance_profile_name) # set state state = aws_get_state() if 'iam' in state and 'instance_profile' in state['iam']: if instance_profile_name in state['iam']['instance_profile']: del state['iam']['instance_profile'][instance_profile_name] aws_set_state(state) def aws_iam_remove_all_instance_profile(): """ WARNING: this will remove all instance profile that has Dataverse in it which means it might remove other instance profile that not from you """ # get all instance profile instance_profiles = AWSClient().iam.list_instance_profiles()["InstanceProfiles"] # remove all the instance_profile that has Dataverse in it for profile in instance_profiles: if "Dataverse" in profile["InstanceProfileName"]: aws_iam_instance_profile_delete(profile["InstanceProfileName"]) def aws_vpc_create(cidr_block=None, tag_name='Dataverse-Temporary-VPC'): # load all vpcs ids to check if the cidr block is occupied vpcs = AWSClient().ec2.describe_vpcs() second_octets = [] for vpc in vpcs['Vpcs']: second_octet = int(vpc['CidrBlock'].split('.')[1]) second_octets.append(second_octet) # auto generate cidr block if not provided if cidr_block is None: is_network_available = False for octet in range(0, 255): if octet not in second_octets: is_network_available = True break if is_network_available: cidr_block = '10.' + str(octet) + '.0.0/16' else: raise Exception('Unable to find an available CIDR block for VPC.') # user provided cidr block elif cidr_block.split('.')[1] in second_octets: raise Exception('The CIDR block is already occupied.') # create vpc vpc = AWSClient().ec2.create_vpc(CidrBlock=cidr_block) vpc_id = vpc['Vpc']['VpcId'] AWSClient().ec2.create_tags( Resources=[vpc_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # update state state = aws_get_state() if 'vpc' not in state: state['vpc'] = {} state['vpc'][vpc_id] = {'public_subnet': False} aws_set_state(state) # wait until vpc is ready waiter = AWSClient().ec2.get_waiter('vpc_available') waiter.wait(VpcIds=[vpc_id]) return vpc_id def aws_vpc_delete(vpc_id): if isinstance(vpc_id, str): vpc_ids = [vpc_id] elif isinstance(vpc_id, list): vpc_ids = vpc_id for vpc_id in vpc_ids: state = aws_get_state() # [ DEPENDENCY ] remove all dependencies # ------------------------------------------------------------ # dataverse managed dependency if state['vpc'][vpc_id]: if 'nat_gateway' in state['vpc'][vpc_id]: aws_nat_gateway_delete(vpc_id, state['vpc'][vpc_id]['nat_gateway']) if 'elastic_ip' in state['vpc'][vpc_id]: aws_elastic_ip_release(vpc_id, state['vpc'][vpc_id]['elastic_ip']) if 'subnet' in state['vpc'][vpc_id]: # NOTE: set retry because terminated EMR cluster iterrupts subnet deletion # by dependency problem for few seconds # HACK: this is a hacky solution and should be fixed in the future RETRY_SUBNET_DELETION = 5 for _ in range(RETRY_SUBNET_DELETION): try: aws_subnet_delete(vpc_id, state['vpc'][vpc_id]['subnet']) break except AWSClient().ec2.exceptions.ClientError as e: if e.response['Error']['Code'] == 'DependencyViolation': time.sleep(5) continue else: raise e except Exception as e: raise e if 'security_group' in state['vpc'][vpc_id]: aws_security_group_delete(vpc_id, state['vpc'][vpc_id]['security_group']) if 'gateway' in state['vpc'][vpc_id]: aws_gateway_delete(vpc_id, state['vpc'][vpc_id]['gateway']) if 'route_table' in state['vpc'][vpc_id]: aws_route_table_delete(vpc_id, state['vpc'][vpc_id]['route_table']) # EMR managed dependency vpc = boto3.resource('ec2').Vpc(vpc_id) # NOTE: remove dependency between security groups for security_group in vpc.security_groups.all(): aws_security_group_remove_dependency(security_group.id) for security_group in vpc.security_groups.all(): if security_group.group_name == "default": continue aws_security_group_delete(vpc_id, security_group.id) # ------------------------------------------------------------ try: AWSClient().ec2.delete_vpc(VpcId=vpc_id) # when vpc doesn't exist except AWSClient().ec2.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidVpcID.NotFound': print(f"VPC {vpc_id} doesn't exist.") # re-thrown other exceptions except Exception as e: raise e if 'vpc' in state and vpc_id in state['vpc']: del state['vpc'][vpc_id] aws_set_state(state) def aws_subnet_create(vpc_id, cird_block=None, tag_name='Dataverse-Temporary-Subnet'): if cird_block is None: # Get VPC information to determine CIDR block vpcs = AWSClient().ec2.describe_vpcs(VpcIds=[vpc_id]) cird_block = vpcs['Vpcs'][0]['CidrBlock'] # create subnet subnet = AWSClient().ec2.create_subnet(CidrBlock=str(cird_block), VpcId=vpc_id) subnet_id = subnet['Subnet']['SubnetId'] AWSClient().ec2.create_tags( Resources=[subnet_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # update state state = aws_get_state() if 'subnet' not in state['vpc'][vpc_id]: state['vpc'][vpc_id]['subnet'] = [] state['vpc'][vpc_id]['subnet'].append(subnet_id) aws_set_state(state) # wait until subnet is ready waiter = AWSClient().ec2.get_waiter('subnet_available') waiter.wait(SubnetIds=[subnet_id]) return subnet_id def aws_subnet_delete(vpc_id, subnet_id): if isinstance(subnet_id, str): subnet_ids = [subnet_id] elif isinstance(subnet_id, list): subnet_ids = subnet_id for subnet_id in subnet_ids: AWSClient().ec2.delete_subnet(SubnetId=subnet_id) state = aws_get_state() if 'vpc' in state and vpc_id in state['vpc']: if 'subnet' in state['vpc'][vpc_id] and subnet_id in state['vpc'][vpc_id]['subnet']: state['vpc'][vpc_id]['subnet'].remove(subnet_id) aws_set_state(state) def aws_subnet_az(subnet_id): """ when subnet id is give find the AZ """ response = AWSClient().ec2.describe_subnets(SubnetIds=[subnet_id]) az = response['Subnets'][0]['AvailabilityZone'] return az def aws_emr_security_group_create( vpc_id, port=4040, group_name='DataverseEMRSecurityGroup', description='Dataverse EMR security group', tag_name='Dataverse-Temporary-EMR-Security-Group' ): """ Create a security group for EMR. # TODO: Create a new function for general purpose. ... args: vpc_id (str): The VPC ID. port (int): The port to open for pyspark UI group_name (str): The name of the security group. description (str): The description of the security group. """ security_group = AWSClient().ec2.create_security_group( GroupName=group_name, Description=description, VpcId=vpc_id, ) security_group_id = security_group['GroupId'] AWSClient().ec2.authorize_security_group_ingress( GroupId=security_group_id, IpPermissions=[ { 'IpProtocol': 'tcp', 'FromPort': port, 'ToPort': port, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}] }, ]) AWSClient().ec2.create_tags( Resources=[security_group_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # set state state = aws_get_state() if 'security_group' not in state['vpc'][vpc_id]: state['vpc'][vpc_id]['security_group'] = [] state['vpc'][vpc_id]['security_group'].append(security_group_id) aws_set_state(state) return security_group_id def aws_security_group_delete(vpc_id, security_group_id): if isinstance(security_group_id, str): security_group_ids = [security_group_id] elif isinstance(security_group_id, list): security_group_ids = security_group_id for security_group_id in security_group_ids: AWSClient().ec2.delete_security_group(GroupId=security_group_id) state = aws_get_state() if 'vpc' in state and vpc_id in state['vpc']: if 'security_group' in state['vpc'][vpc_id] and security_group_id in state['vpc'][vpc_id]['security_group']: state['vpc'][vpc_id]['security_group'].remove(security_group_id) aws_set_state(state) def aws_security_group_remove_dependency(security_group_id): """ """ response = AWSClient().ec2.describe_security_groups( GroupIds=[security_group_id] ) # Removing inbound rules inbound_rules = response['SecurityGroups'][0]['IpPermissions'] if inbound_rules: AWSClient().ec2.revoke_security_group_ingress( GroupId=security_group_id, IpPermissions=inbound_rules ) # Removing outbound rules outbound_rules = response['SecurityGroups'][0]['IpPermissionsEgress'] if outbound_rules: AWSClient().ec2.revoke_security_group_egress( GroupId=security_group_id, IpPermissions=outbound_rules ) def aws_gateway_create(vpc_id, tag_name='Dataverse-Gateway'): """ Create a gateway for public subnet. """ gateway = AWSClient().ec2.create_internet_gateway() gateway_id = gateway['InternetGateway']['InternetGatewayId'] # attach gateway to vpc AWSClient().ec2.attach_internet_gateway( InternetGatewayId=gateway_id, VpcId=vpc_id ) AWSClient().ec2.create_tags( Resources=[gateway_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # set state state = aws_get_state() if 'gateway' not in state['vpc'][vpc_id]: state['vpc'][vpc_id]['gateway'] = [] state['vpc'][vpc_id]['gateway'].append(gateway_id) aws_set_state(state) # wait until gateway is ready waiter = AWSClient().ec2.get_waiter('internet_gateway_exists') waiter.wait(InternetGatewayIds=[gateway_id]) return gateway_id def aws_gateway_delete(vpc_id, gateway_id): if isinstance(gateway_id, str): gateway_ids = [gateway_id] elif isinstance(gateway_id, list): gateway_ids = gateway_id for gateway_id in gateway_ids: # detach gateway from vpc AWSClient().ec2.detach_internet_gateway( InternetGatewayId=gateway_id, VpcId=vpc_id ) AWSClient().ec2.delete_internet_gateway(InternetGatewayId=gateway_id) state = aws_get_state() if 'vpc' in state and vpc_id in state['vpc']: if 'gateway' in state['vpc'][vpc_id] and gateway_id in state['vpc'][vpc_id]['gateway']: state['vpc'][vpc_id]['gateway'].remove(gateway_id) aws_set_state(state) def aws_elastic_ip_allocate(vpc_id, tag_name='Dataverse-Elastic-IP'): """ Allocate an elastic ip. """ elastic_ip = AWSClient().ec2.allocate_address(Domain='vpc') elastic_ip_id = elastic_ip['AllocationId'] AWSClient().ec2.create_tags( Resources=[elastic_ip_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # set state state = aws_get_state() if 'vpc' not in state: state['vpc'] = {} if vpc_id not in state['vpc']: state['vpc'][vpc_id] = {} if 'elastic_ip' not in state['vpc'][vpc_id]: state['vpc'][vpc_id]['elastic_ip'] = [] state['vpc'][vpc_id]['elastic_ip'].append(elastic_ip_id) aws_set_state(state) # TODO: wait until elastic ip is ready ... return elastic_ip_id def aws_elastic_ip_release(vpc_id, elastic_ip_id): if isinstance(elastic_ip_id, str): elastic_ip_ids = [elastic_ip_id] elif isinstance(elastic_ip_id, list): elastic_ip_ids = elastic_ip_id for elastic_ip_id in elastic_ip_ids: try: AWSClient().ec2.release_address(AllocationId=elastic_ip_id) state = aws_get_state() if 'vpc' in state and vpc_id in state['vpc']: if 'elastic_ip' in state['vpc'][vpc_id] and elastic_ip_id in state['vpc'][vpc_id]['elastic_ip']: state['vpc'][vpc_id]['elastic_ip'].remove(elastic_ip_id) aws_set_state(state) except AWSClient().ec2.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidAllocationID.NotFound': print(f"Elastic IP id {elastic_ip_id} doesn't exist.") else: raise e except Exception as e: raise e def aws_nat_gateway_create( vpc_id, subnet_id, elastic_ip_id, tag_name='Dataverse-NAT-Gateway' ): """ Create a NAT gateway for private subnet. """ # create NAT gateway nat_gateway = AWSClient().ec2.create_nat_gateway( AllocationId=elastic_ip_id, SubnetId=subnet_id, ) nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] # set tag AWSClient().ec2.create_tags( Resources=[nat_gateway_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # set state state = aws_get_state() if 'vpc' not in state: state['vpc'] = {} if vpc_id not in state['vpc']: state['vpc'][vpc_id] = {} if 'nat_gateway' not in state['vpc'][vpc_id]: state['vpc'][vpc_id]['nat_gateway'] = [] state['vpc'][vpc_id]['nat_gateway'].append(nat_gateway_id) aws_set_state(state) # wait until NAT gateway is ready waiter = AWSClient().ec2.get_waiter('nat_gateway_available') waiter.wait(NatGatewayIds=[nat_gateway_id]) return nat_gateway_id def aws_nat_gateway_delete(vpc_id, nat_gateway_id): if isinstance(nat_gateway_id, str): nat_gateway_ids = [nat_gateway_id] elif isinstance(nat_gateway_id, list): nat_gateway_ids = nat_gateway_id for nat_gateway_id in nat_gateway_ids: # delete NAT gateway AWSClient().ec2.delete_nat_gateway(NatGatewayId=nat_gateway_id) # set state state = aws_get_state() if 'vpc' in state and vpc_id in state['vpc']: if 'nat_gateway' in state['vpc'][vpc_id] and nat_gateway_id in state['vpc'][vpc_id]['nat_gateway']: state['vpc'][vpc_id]['nat_gateway'].remove(nat_gateway_id) aws_set_state(state) # wait until NAT gateway is deleted waiter = AWSClient().ec2.get_waiter('nat_gateway_deleted') waiter.wait(NatGatewayIds=[nat_gateway_id]) def aws_route_table_create( vpc_id, gateway_id=None, nat_gateway_id=None, tag_name='Dataverse-Route-Table', destination_cidr_block='0.0.0.0/0', ): """ Create a route table for subnet. """ route_table = AWSClient().ec2.create_route_table(VpcId=vpc_id) route_table_id = route_table['RouteTable']['RouteTableId'] args = { 'DestinationCidrBlock': destination_cidr_block, 'RouteTableId': route_table_id, } if gateway_id is not None: args['GatewayId'] = gateway_id if nat_gateway_id is not None: args['NatGatewayId'] = nat_gateway_id AWSClient().ec2.create_route(**args) AWSClient().ec2.create_tags( Resources=[route_table_id], Tags=[ {'Key': 'Name', 'Value': tag_name}, ] ) # set state state = aws_get_state() if 'route_table' not in state['vpc'][vpc_id]: state['vpc'][vpc_id]['route_table'] = [] state['vpc'][vpc_id]['route_table'].append(route_table_id) aws_set_state(state) # TODO: wait until route table is ready # didn't found waiter for route table ... return route_table_id def aws_route_table_delete(vpc_id, route_table_id): if isinstance(route_table_id, str): route_table_ids = [route_table_id] elif isinstance(route_table_id, list): route_table_ids = route_table_id for route_table_id in route_table_ids: AWSClient().ec2.delete_route_table(RouteTableId=route_table_id) state = aws_get_state() if 'vpc' in state and vpc_id in state['vpc']: if 'route_table' in state['vpc'][vpc_id] and route_table_id in state['vpc'][vpc_id]['route_table']: state['vpc'][vpc_id]['route_table'].remove(route_table_id) aws_set_state(state) def aws_route_table_asscociate_subnet(subnet_id, route_table_id): route_table = boto3.resource('ec2').RouteTable(route_table_id) route_table.associate_with_subnet(SubnetId=subnet_id) def aws_s3_path_parse(path): """ parse aws s3 path to bucket and key """ aws_s3_matched = re.match(r's3[a,n]?://([^/]+)/(.*)', path) if aws_s3_matched: bucket = aws_s3_matched.group(1) path = aws_s3_matched.group(2) else: raise Exception(f"Invalid S3 path: {path}") return bucket, path def aws_s3_create_bucket(bucket): """ create aws s3 bucket Args: bucket (str): bucket name (must be unique) location (str): aws region name """ AWSClient().s3.create_bucket( Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': AWSClient().region} ) def aws_s3_delete_bucket(bucket): """ delete aws s3 bucket Args: bucket (str): bucket name """ AWSClient().s3.delete_bucket(Bucket=bucket) def aws_s3_read(bucket, key): """ Args: bucket (str): bucket name key (str): key (aws s3 file path) Usage: aws_s3_read('tmp', 'this/is/path.json') """ obj = AWSClient().s3.get_object(Bucket=bucket, Key=key) text = obj['Body'].read().decode('utf-8') return text def aws_s3_download(bucket, key, local_path): """ Args: bucket (str): bucket name key (str): key (aws s3 file path) local_path (str): local path to save file Usage: aws_s3_download('tmp', 'this/is/path.json', 'path.json') """ obj_type = aws_s3_get_object_type(bucket, key) if obj_type == 'folder': paginator = AWSClient().s3.get_paginator('list_objects') page_iterator = paginator.paginate(Bucket=bucket) for page in page_iterator: for obj in page['Contents']: bucket_key = obj['Key'] if not bucket_key.startswith(key): continue rel_bucket_path = bucket_key.replace(key, '') if rel_bucket_path.startswith('/'): rel_bucket_path = rel_bucket_path[1:] local_file_path = os.path.join(local_path, rel_bucket_path) os.makedirs(os.path.dirname(local_file_path), exist_ok=True) AWSClient().s3.download_file(bucket, bucket_key, local_file_path) elif obj_type == 'file': AWSClient().s3.download_file(bucket, key, local_path) elif obj_type == 'no_obj': raise Exception(f"Object doesn't exist: {key}") def aws_s3_upload(bucket, key, local_path): """ Args: bucket (str): bucket name key (str): key (aws s3 file path) local_path (str): local path to save file Usage: aws_s3_upload('tmp', 'this/is/path.json', 'path.json') """ if os.path.isdir(local_path): files = glob.glob(os.path.join(local_path, "*/*")) for file in files: rel_path = os.path.relpath(file, local_path) _key = os.path.join(key, rel_path) # NOTE: no need to upload folder if os.path.isdir(file): continue AWSClient().s3.upload_file(file, bucket, _key) else: AWSClient().s3.upload_file(local_path, bucket, key) def aws_s3_write(bucket, key, obj): """ Args: bucket (str): bucket name key (str): key (aws s3 file path) obj (str): object to write Usage: aws_s3_write('tmp', 'this/is/path.json', '{"hello": "world"}') """ AWSClient().s3.put_object(Bucket=bucket, Key=key, Body=obj) def aws_s3_delete(bucket, key): """ Args: bucket (str): bucket name key (str): key (aws s3 file path) Usage: aws_s3_delete('tmp', 'this/is/path.json') """ obj_type = aws_s3_get_object_type(bucket, key) if obj_type == 'folder': paginator = AWSClient().s3.get_paginator('list_objects') page_iterator = paginator.paginate(Bucket=bucket) for page in page_iterator: for obj in page['Contents']: bucket_key = obj['Key'] if not bucket_key.startswith(key): continue AWSClient().s3.delete_object(Bucket=bucket, Key=bucket_key) elif obj_type == 'file': AWSClient().s3.delete_object(Bucket=bucket, Key=key) elif obj_type == 'no_obj': raise Exception(f"Object doesn't exist: {key}") def aws_s3_list_buckets(): """ get all buckets from aws s3 """ buckets = AWSClient().s3.list_buckets()['Buckets'] bucket_names = [] for bucket in buckets: bucket_names.append(bucket['Name']) return bucket_names def aws_s3_ls(query=None): """ ls command for aws s3 this is made to be similar to linux ls command and unified to only single args usage to make it simple Args: query (str): file search query Returns: list: list of files/folders - list ends with '/' if it is a folder Usage: ```python - bucket/ - subfolder1/ - duck_folder1/ - duck_folder2/ - duck_file.txt - subfolder2/ - subfile1.json ``` >>> aws_list() - bucket/ >>> aws_list(bucket) - subfolder1/ - subfolder2/ - subfile1.json >>> aws_list(bucket/subfolder1") - ducky_folder1/ - ducky_folder2/ - ducky_file.txt """ if query is None or query == "": return aws_s3_list_buckets() elif len(query.split("/")) > 1: bucket, prefix = query.split("/", 1) else: bucket = query prefix = "" if prefix and not prefix.endswith("/"): prefix += "/" results = AWSClient().s3.list_objects_v2( Bucket=bucket, Prefix=prefix, Delimiter="/", ) objects = [] # TODO: no limit to 1,000 objects - use pagination ... # files if "Contents" in results: objects.extend(list(obj["Key"] for obj in results["Contents"])) # subfolders if "CommonPrefixes" in results: objects.extend(list(obj["Prefix"] for obj in results["CommonPrefixes"])) # set default remove_prefix = True if remove_prefix: # remove the prefix itself objects = list(obj.replace(prefix, "") for obj in objects) # remove '' objects = list(obj for obj in objects if obj) else: for obj in objects: if obj == prefix: objects.remove(obj) return objects def aws_s3_get_object_type(bucket, key): """ get object type from s3 NOTE: S3 don't have a concept of folder so this is a hardcoded solution to check key is file/folder or doesn't exist TODO: if there is edge case that this function doesn't cover please add it to the test case """ results = AWSClient().s3.list_objects_v2( Bucket=bucket, Prefix=key, Delimiter="/", ) if 'CommonPrefixes' in results: prefix_folders = results['CommonPrefixes'][0]['Prefix'].split('/') key_folders = key.split('/') # remove '' prefix_folders = [x for x in prefix_folders if x != ''] key_folders = [x for x in key_folders if x != ''] # check key exacly match prefix for key_folder in key_folders: if key_folder not in prefix_folders: return 'no_obj' return 'folder' elif 'Contents' in results: content = results['Contents'][0]['Key'] if content == key: if content.endswith('/'): return 'folder' else: return 'file' else: return 'folder' else: return 'no_obj' ================================================ FILE: dataverse/utils/format/README.md ================================================ # Format > ETL is backed by spark and `format` is a helpers to reformat data. It could be **collection of converters** converts from any data format into spark readable format or any utils that helps with data format. ## Support Format - ufl - huggingface ================================================ FILE: dataverse/utils/format/__init__.py ================================================ from .huggingface import huggingface2parquet from .huggingface import load_huggingface_dataset from .ufl import get_uuidv1 from .ufl import get_uuidv4 ================================================ FILE: dataverse/utils/format/huggingface.py ================================================ import os import datasets from pathlib import Path from omegaconf import ListConfig from dataverse.utils.setting import SystemSetting def load_huggingface_dataset(name_or_path, split=None, from_disk=False): """ load huggingface dataset Args: name_or_path (str or list): the name or path of the huggingface dataset split (str): the split of the dataset """ if from_disk: if split is not None: raise ValueError("split is not supported when from_disk is True") # load huggingface dataset from disk if isinstance(name_or_path, str): dataset = datasets.load_from_disk(name_or_path) elif isinstance(name_or_path, list): dataset = datasets.load_from_disk(*name_or_path) elif isinstance(name_or_path, ListConfig): dataset_list = [datasets.load_from_disk(nop) for nop in name_or_path] dataset = datasets.concatenate_datasets(dataset_list) else: raise ValueError(f"Unsupported type of name_or_path: {type(name_or_path)}") else: # load huggingface dataset if isinstance(name_or_path, str): dataset = datasets.load_dataset(name_or_path, split=split) elif isinstance(name_or_path, list): dataset = datasets.load_dataset(*name_or_path, split=split) elif isinstance(name_or_path, ListConfig): dataset = datasets.load_dataset(*name_or_path, split=split) else: raise ValueError(f"Unsupported type of name_or_path: {type(name_or_path)}") return dataset def huggingface2parquet( dataset: datasets.Dataset, cache_dir: str = None, verbose: bool = True, **kwargs ): """ Convert a huggingface dataset to parquet format and save it to the path. Args: dataset (datasets.Dataset): a huggingface dataset cache_dir (str): cache path to save the dataset verbose (bool): whether to print the information of the dataset """ # check the dataset which has train, test, validation or other splits # concatenate all the splits into one dataset_list = [] # check the dataset has splits try: for split in dataset.keys(): dataset_list.append(dataset[split]) except: dataset_list.append(dataset) dataset = datasets.concatenate_datasets(dataset_list) # save the dataset to parquet # FIXME: this is a temporary solution to store the dataset in the package root path # we will change it to a better solution in the future if cache_dir is None: # save the parquet at package root path cache_dir = SystemSetting().CACHE_DIR dataset_path = f"{cache_dir}/.cache/dataverse/dataset/huggingface_{dataset._fingerprint}.parquet" # check the dataset exist if os.path.exists(dataset_path): if verbose: print(f"Dataset already exists at {dataset_path}") return dataset_path os.makedirs(f"{cache_dir}/.cache/dataverse/dataset", exist_ok=True) dataset.to_parquet(dataset_path) return dataset_path if __name__ == "__main__": # test the function dataset = load_huggingface_dataset(["glue", "mrpc"]) dataset_path = huggingface2parquet(dataset, verbose=True) print(f"Dataset saved at {dataset_path}") ================================================ FILE: dataverse/utils/format/ufl.py ================================================ """ UFL (Upstage Format for LLM) """ import uuid def get_uuidv1(): return uuid.uuid1().hex def get_uuidv4(): return uuid.uuid4().hex ================================================ FILE: dataverse/utils/setting/README.md ================================================ # Setting > Setting includes Environment Variables, User Secrets ## System Settings > The heart of the system. It contains the configuration of the system. ### naming convention 1. Only CAPITALIZED format - e.g. `CACHE_DIR` (O) - e.g. `cache_dir` (X) 2. Only alphanumeric and underscore - e.g. `CACHE_DIR2` (O) - e.g. `cache-dir` (X) - e.g. `CACHE_@DIR` (X) 3. Only one underscore between words - e.g. `CACHE__DIR` (X) 4. No underscore at the start/end of the key - e.g. `_CACHE_DIR` (X) - e.g. `CACHE_DIR_` (X) ### System Setting Policy - Only memory (not stored in the file) - Only updated by `Environment Variables` - Default Setting Manually updated - check `system.py.SystemSetting.default_setting()` - No update after the system is initialized - If you want to change the setting, you must restart the system. ### How to modify? - Only by Setting Environment Variables ```bash # dynamic CACHE_DIR=/path/to/cache/dir python3 main.py # static export CACHE_DIR=/path/to/cache/dir python3 main.py ``` ### How to use `SystemSetting` > **This MUST be used internally by the system**. But just in case, you can use it in 3 ways. ```python from dataverse.utils.setting import SystemSetting # get the setting cache_dir = SystemSetting().get('CACHE_DIR') cache_dir = SystemSetting()['CACHE_DIR'] cache_dir = SystemSetting().CACHE_DIR # set the setting SystemSetting().set('CACHE_DIR', '/path/to/cache/dir') SystemSetting()['CACHE_DIR'] = '/path/to/cache/dir' SystemSetting().CACHE_DIR = '/path/to/cache/dir' ``` ## User Settings > API keys, passwords, or other sensitive information of user. ### naming convention 1. Only CAPITALIZED format - e.g. `GITHUB_API` (O) - e.g. `github_api` (X) 2. Only alphanumeric and underscore - e.g. `GITHUB_API2` (O) - e.g. `github-api` (X) - e.g. `GITHUB_@API` (X) 3. Only one underscore between words - e.g. `GITHUB__API` (X) 4. No underscore at the start/end of the key - e.g. `_GITHUB_API` (X) - e.g. `GITHUB_API_` (X) ### Where does it store? > Setting will be stored in `CACHE_DIR` set in `SystemSetting` with the name of `user_setting.json`. ```python from dataverse.utils.setting import SystemSetting {SystemSetting().CACHE_DIR}/.cache/dataverse/setting/user_setting.json ``` ### How to modify? 1. You could modify the `user_setting.json` file directly 2. or can use proxy class `UserSetting` - this is synchronized with the `user_setting.json` file ```python from dataverse.utils.setting import UserSetting ``` ### How to use `UserSetting` proxy? > There is 3 ways to use it. ```python from dataverse.utils.setting import UserSetting # get the value github_api = UserSetting().get('GITHUB_API') github_api = UserSetting()['GITHUB_API'] github_api = UserSetting().GITHUB_API # set the value UserSetting().set('GITHUB_API', 'your_github_api_key') UserSetting()['GITHUB_API'] = 'your_github_api_key' UserSetting().GITHUB_API = 'your_github_api_key' ``` ================================================ FILE: dataverse/utils/setting/__init__.py ================================================ from dataverse.utils.setting.user import UserSetting from dataverse.utils.setting.system import SystemSetting ================================================ FILE: dataverse/utils/setting/system.py ================================================ """ Interface for system setting """ import os import re import uuid import json import boto3 import pyspark from pathlib import Path import dataverse from dataverse.utils.api import aws_check_credentials from dataverse.utils.api import aws_s3_create_bucket from dataverse.utils.api import aws_s3_list_buckets class SystemSetting: """ System Setting CRUD interface system setting holds all the variables that influence the behavior of the dataverse system. Also, this class is a singleton class, so you can use it anywhere in the code [ MEMORY ONLY ] - system setting is stored in memory only - system setting is not persistent [ Update by Env Variable ] - system setting can be updated by env variable [ Manual Update ] - default system setting can be updated manually - check `default_setting()` [ No Update after Initialization ] - system could be updated but not reflected in the program - need to restart the program to use new system setting """ # Singleton _initialized = False # TODO: system setting per user [Candidate] ... def __new__(cls): if not hasattr(cls, 'instance'): cls.instance = super(SystemSetting, cls).__new__(cls) return cls.instance def __init__(self): # when the class is initialized, this is called everytime # regardless of the singleton. So adding the flag to check if self._initialized: return self.default_setting() self.update_by_env() self._initialized = True def _get_aws_bucket(self, verbose=True): """ the bucket will be used to store the dataverse info - cache - log - etc - format - dataverse-{MAGIC_NUMBER}-{UUID} """ # if aws credential is not valid, return None if not aws_check_credentials(): return None identify_prefix = f'dataverse-{self.MAGIC_NUMBER}-' for bucket in aws_s3_list_buckets(): if identify_prefix in bucket: # check if the last part is uuid uuid_part = bucket.replace(identify_prefix, "") try: uuid.UUID(uuid_part) # Use this bucket for your package operations if verbose: print("Detected Dataverse Bucket: " + bucket) return bucket except ValueError: # not a valid UUID, so ignore this bucket pass # if there is no relevant bucket, create one bucket = f'dataverse-{self.MAGIC_NUMBER}-{uuid.uuid1()}' aws_s3_create_bucket(bucket) return bucket def default_setting(self): """ Reset the system setting to default Default setting: - `MAGIC_NUMBER`: magic number for dataverse - `CACHE_DIR`: default cache directory - `IS_CLI`: if the program is running in CLI mode - `AWS_BUCKET`: default aws bucket name for dataverse info - `SPARK_VERSION`: spark version - `HADOOP_VERSION`: hadoop version """ self.system_setting = {} # MAGIC NUMBER # dv - Dataverse # 42 - (The Hitchhiker's Guide to the Galaxy) # The Answer to the Ultimate Question of Life, the Universe, and Everything self.MAGIC_NUMBER = "dv42" # DATAVERSE self.DATAVERSE_HOME = os.path.dirname(dataverse.__file__) # HARD CODED DEFAULT SETTING self.CACHE_DIR = Path.home().as_posix() self.IS_CLI = False # AWS SETTING self.AWS_BUCKET = self._get_aws_bucket() # SPARK VERSION self.SPARK_VERSION = pyspark.__version__ # HADOOP VERSION jars = Path(pyspark.__file__).parent / "jars" hadoop_jar = list(jars.glob("hadoop-client-runtime*.jar")) self.HADOOP_VERSION = re.findall(r"\d+\.\d+\.\d+", hadoop_jar[0].name)[-1] # TODO: add more default setting here ... def update_by_env(self): """ Update the system setting by env variable """ # check if the env variable is set for key in self.system_setting: if key in os.environ: self.system_setting[key] = os.environ[key] def check_naming_convention(self, key): """ 1. only CAPITALIZED format - e.g. CACHE_DIR (O) - e.g. cache_dir (X) 2. only alphanumeric and underscore - e.g. CACHE_DIR2 (O) - e.g. cache-dir (X) - e.g. CACHE_@DIR (X) 3. only one underscore between words - e.g. CACHE__DIR (X) 4. no underscore at the start/end of the key - e.g. _CACHE_DIR (X) - e.g. CACHE_DIR_ (X) """ # 1. only CAPITALIZED format if key != key.upper(): raise ValueError(f"key [ {key} ] is not in Capitalized format") # 2. only alphanumeric and underscore for char in key: if not char.isalnum() and char != "_": raise ValueError(f"key [ {key} ] should only contains alphanumeric and underscore") # 3. only one underscore between words if "_" in key: # check if there is only one underscore divided_keys = key.split("_") if "" in divided_keys: raise ValueError(f"key [ {key} ] contains more than one underscore") # 4. no underscore at the start/end of the key if key.startswith("_") or key.endswith("_"): raise ValueError(f"key [ {key} ] contains underscore at the start/end of the key") def get(self, key): """ """ if key not in self.system_setting: raise KeyError(f"key [ {key} ] does not exist in SYSTEM setting") return self.system_setting[key] def set(self, key, value): """ """ self.check_naming_convention(key) self.system_setting[key] = value # Support dot-like access, e.g. setting.CACHE_DIR def __getattr__(self, key): if key in [ "_initialized", "system_setting" ]: return super().__getattr__(key) else: return self.get(key) def __setattr__(self, key, value): if key in [ "_initialized", "system_setting" ]: super().__setattr__(key, value) else: self.set(key, value) # Support dict-like access, e.g. setting["CACHE_DIR"] def __getitem__(self, key): return self.get(key) def __setitem__(self, key, value): self.set(key, value) def delete(self, key): """ """ if key in self.system_setting: self.system_setting.pop(key, None) else: raise KeyError(f"key [ {key} ] does not exist in SYSTEM setting") def list(self): """ List all settings """ print(self.system_setting) def __repr__(self): return json.dumps(self.system_setting, indent=4) def __str__(self): return json.dumps(self.system_setting, indent=4) ================================================ FILE: dataverse/utils/setting/user.py ================================================ """ Interface for user setting """ import os import json from pathlib import Path from dataverse.utils.setting.system import SystemSetting class UserSetting: """ Proxy for user setting CRUD, synchronized with the user_setting.json file To manage user API keys, passwords, or other sensitive information, you can use directly store the information in the user_setting.json file or use this class to store the information as proxy. Also, this class is a singleton class, so you can use it anywhere in the code caveat: - this is just a storage and does not include any logic more than CRUD - anything more than CRUD is a responsibility of outside of this class what does it means resposibility of outside of this class: When user wants to save API key and if does not exist asking user to input the API key with stdin might be a good idea. But this is not the responsibility of this class. This class will only return API key exists or not and base on that, outside of this class will ask user to input the API key or raise error or whatever it needs to do. """ # Singleton _initialized = False # TODO: system setting per user [Candidate] ... def __new__(cls): if not hasattr(cls, 'instance'): cls.instance = super(UserSetting, cls).__new__(cls) return cls.instance def __init__(self): # when the class is initialized, this is called everytime # regardless of the singleton. So adding the flag to check if self._initialized: return # create the user setting path os.makedirs(f"{SystemSetting().CACHE_DIR}/.cache/dataverse/setting", exist_ok=True) self.user_setting_path = os.path.join( SystemSetting().CACHE_DIR, ".cache/dataverse/setting/user_setting.json" ) # load the user setting, if not exist, empty dict will be assigned self.user_setting = self.load(self.user_setting_path) self._initialized = True def reset(self): """ reset the setting """ self.user_setting = {} self.sync_file() def sync_file(self): """ sync (class -> file) """ with open(self.user_setting_path, "w") as f: json.dump(self.user_setting, f, indent=4) def sync_class(self): """ sync (file -> class) """ # sync the file to make sure the dict is up-to-date self.user_setting = self.load(self.user_setting_path) def load(self, path): """ Load the user setting file """ # check if user setting file exists if not os.path.exists(path): return {} # read the file with open(path, "r") as f: json_file = json.load(f) return json_file def check_naming_convention(self, key): """ 1. only CAPITALIZED format - e.g. GITHUB_API (O) - e.g. github_api (X) 2. only alphanumeric and underscore - e.g. GITHUB_API2 (O) - e.g. github-api (X) - e.g. GITHUB_@API (X) 3. only one underscore between words - e.g. GITHUB__API (X) 4. no underscore at the start/end of the key - e.g. _GITHUB_API (X) - e.g. GITHUB_API_ (X) """ # 1. only CAPITALIZED format if key != key.upper(): raise ValueError(f"key [ {key} ] is not in Capitalized format") # 2. only alphanumeric and underscore for char in key: if not char.isalnum() and char != "_": raise ValueError(f"key [ {key} ] should only contains alphanumeric and underscore") # 3. only one underscore between words if "_" in key: # check if there is only one underscore divided_keys = key.split("_") if "" in divided_keys: raise ValueError(f"key [ {key} ] contains more than one underscore") # 4. no underscore at the start/end of the key if key.startswith("_") or key.endswith("_"): raise ValueError(f"key [ {key} ] contains underscore at the start/end of the key") def get(self, key): """ """ self.sync_class() if key not in self.user_setting: raise KeyError(f"key [ {key} ] does not exist in USER setting") return self.user_setting[key] def set(self, key, value): """ """ self.check_naming_convention(key) self.user_setting[key] = value self.sync_file() # Support dot-like access, e.g. setting.GITHUB_API def __getattr__(self, key): if key in [ "_initialized", "user_setting", "user_setting_path" ]: return super().__getattr__(key) else: return self.get(key) def __setattr__(self, key, value): if key in [ "_initialized", "user_setting", "user_setting_path" ]: super().__setattr__(key, value) else: self.set(key, value) # Support dict-like access, e.g. setting["GITHUB_API"] def __getitem__(self, key): return self.get(key) def __setitem__(self, key, value): self.set(key, value) def delete(self, key): """ """ if key in self.user_setting: self.user_setting.pop(key, None) self.sync_file() else: raise KeyError(f"key [ {key} ] does not exist in USER setting") def list(self): """ List all settings """ self.sync_class() print(self.user_setting) def __repr__(self): self.sync_class() return json.dumps(self.user_setting, indent=4) def __str__(self): self.sync_class() return json.dumps(self.user_setting, indent=4) ================================================ FILE: docs/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile html html: @echo "Setting environment variable for the build process..." @export DATAVERSE_BUILD_DOC=true && \ $(SPHINXBUILD) -b html "$(SOURCEDIR)" "$(BUILDDIR)/html" $(SPHINXOPTS) $(O) && \ echo "Build is finished. Cleaning up environment variable." && \ unset DATAVERSE_BUILD_DOC # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: docs/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd ================================================ FILE: docs/source/citation.rst ================================================ =================== Citation =================== If you want to cite our *Dataverse* project, feel free to use the following bibtex:: @misc{dataverse, title = {Dataverse}, author = {Hyunbyung Park, Sukyung Lee, Gyoungjin Gim, Yungi Kim, Dahyun Kim, Chanjun Park}, year = {2024}, publisher = {GitHub, Upstage AI}, howpublished = {\url{https://github.com/UpstageAI/dataverse}}, } ================================================ FILE: docs/source/conf.py ================================================ # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- import inspect # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys import sphinx_pdj_theme from sphinx.application import Sphinx sys.path.insert(0, os.path.abspath("../..")) # -- Project information ----------------------------------------------------- project = "dataverse" copyright = "2024, Upstage AI" author = "Upstage AI" # The full version, including alpha/beta/rc tags release = "1.0.4" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.napoleon", "sphinx.ext.autosummary", "sphinx.ext.githubpages", ] todo_include_todos = True napoleon_google_docstring = True napoleon_numpy_docstring = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_permalinks_icon = "<span>#</span>" html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["../images/"] # -- Handle register_etl decorator ------------------------------------------------- sys.path.insert(0, os.path.abspath("../../dataverse")) def process_signature( app: Sphinx, what: str, name: str, obj, options, signature, return_annotation ): if what == "function" and hasattr(obj, "run"): original_func = obj.run.__wrapped__ new_signature = inspect.signature(original_func) parameters = list(new_signature.parameters.values()) new_signature = new_signature.replace( parameters=[ inspect.Parameter("self", inspect.Parameter.POSITIONAL_OR_KEYWORD) ] + parameters ) return str(new_signature), return_annotation return signature, return_annotation def skip_undoc_members(app, what, name, obj, skip, options): if inspect.isclass(obj) and not hasattr(obj, "__is_etl__"): return None return True def setup(app): app.connect("autodoc-process-signature", process_signature) app.connect("autodoc-skip-member", skip_undoc_members) ================================================ FILE: docs/source/config/config.interface.rst ================================================ config ======================== .. automodule:: config.interface.Config :members: :undoc-members: :show-inheritance: .. automethod:: config.interface.Config.load .. automethod:: config.interface.Config.save .. automethod:: config.interface.Config.default .. automethod:: config.interface.Config.set_default ================================================ FILE: docs/source/etl/etl.bias.rst ================================================ etl.bias ================ Reducing skewed or prejudiced data, with a particular emphasis on data that reinforces stereotypes of LLMs. .. automodule:: etl.bias :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/source/etl/etl.cleaning.rst ================================================ etl.cleaning ==================== Removing irrelevant, redun-dant, or noisy information from the data, such as stop words or special characters. etl.cleaning.char module ------------------------ .. automodule:: etl.cleaning.char :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.char.cleaning___char___remove_accent .. autofunction:: etl.cleaning.char.cleaning___char___normalize_whitespace .. autofunction:: etl.cleaning.char.cleaning___char___remove_unprintable etl.cleaning.document module ---------------------------- .. automodule:: etl.cleaning.document :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.document.cleaning___document___split_by_word etl.cleaning.html module ------------------------ .. automodule:: etl.cleaning.html :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.html.cleaning___html___extract_plain_text etl.cleaning.korean module -------------------------- .. automodule:: etl.cleaning.korean :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.korean.cleaning___korean___filter_by_ratio .. autofunction:: etl.cleaning.korean.cleaning___korean___reduce_emoticon etl.cleaning.length module -------------------------- .. automodule:: etl.cleaning.length :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.length.cleaning___length___char_len_filter .. autofunction:: etl.cleaning.length.cleaning___length___word_len_filter etl.cleaning.number module -------------------------- .. automodule:: etl.cleaning.number :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.number.cleaning___number___normalize etl.cleaning.table module ------------------------- .. automodule:: etl.cleaning.table :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.table.cleaning___table___merge_col_vertical etl.cleaning.unicode module --------------------------- .. automodule:: etl.cleaning.unicode :members: :undoc-members: :show-inheritance: .. autofunction:: etl.cleaning.unicode.cleaning___unicode___remove_punct .. autofunction:: etl.cleaning.unicode.cleaning___unicode___replace_punct .. autofunction:: etl.cleaning.unicode.cleaning___unicode___normalize ================================================ FILE: docs/source/etl/etl.data_ingestion.rst ================================================ etl.data\_ingestion =========================== Facilitating the loading of data from various sources (e.g., data in Huggingface Hub, and parquet/csv/arrow format data in local storage) into a preferred format. etl.data\_ingestion.arrow module -------------------------------- .. automodule:: etl.data_ingestion.arrow :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.arrow.data_ingestion___arrow___hf2raw etl.data\_ingestion.common\_crawl module ---------------------------------------- .. automodule:: etl.data_ingestion.common_crawl :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.common_crawl.data_ingestion___common_crawl___wet2raw .. autofunction:: etl.data_ingestion.common_crawl.data_ingestion___common_crawl___dump2raw .. autofunction:: etl.data_ingestion.common_crawl.data_ingestion___common_crawl___raw2ufl etl.data\_ingestion.csv module ------------------------------ .. automodule:: etl.data_ingestion.csv :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.csv.data_ingestion___csv___csv2raw etl.data\_ingestion.cultura\_x module ------------------------------------- .. automodule:: etl.data_ingestion.cultura_x :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.cultura_x.data_ingestion___cultura_x___raw2ufl etl.data\_ingestion.huggingface module -------------------------------------- .. automodule:: etl.data_ingestion.huggingface :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.huggingface.data_ingestion___huggingface___hf2raw etl.data\_ingestion.parquet module ---------------------------------- .. automodule:: etl.data_ingestion.parquet :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.parquet.data_ingestion___parquet___pq2raw etl.data\_ingestion.red\_pajama module -------------------------------------- .. automodule:: etl.data_ingestion.red_pajama :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.red_pajama.data_ingestion___red_pajama___parquet2ufl .. autofunction:: etl.data_ingestion.red_pajama.data_ingestion___red_pajama___hf2ufl .. autofunction:: etl.data_ingestion.red_pajama.data_ingestion___red_pajama___hf2raw .. autofunction:: etl.data_ingestion.red_pajama.data_ingestion___red_pajama___raw2ufl_templatev1 etl.data\_ingestion.slim\_pajama module --------------------------------------- .. automodule:: etl.data_ingestion.slim_pajama :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.slim_pajama.data_ingestion___slim_pajama___parquet2ufl .. autofunction:: etl.data_ingestion.slim_pajama.data_ingestion___slim_pajama___hf2ufl etl.data\_ingestion.test module ------------------------------- .. automodule:: etl.data_ingestion.test :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_ingestion.test.data_ingestion___test___generate_fake_ufl ================================================ FILE: docs/source/etl/etl.data_save.rst ================================================ etl.data\_save ====================== Persisting the processed data into a preferred destination, such as a data lake or database. etl.data\_save.aws module ------------------------- .. automodule:: etl.data_save.aws :members: :undoc-members: :show-inheritance: etl.data\_save.huggingface module --------------------------------- .. automodule:: etl.data_save.huggingface :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_save.huggingface.data_save___huggingface___ufl2hf_hub .. autofunction:: etl.data_save.huggingface.data_save___huggingface___ufl2hf .. autofunction:: etl.data_save.huggingface.data_save___huggingface___ufl2hf_obj etl.data\_save.parquet module ----------------------------- .. automodule:: etl.data_save.parquet :members: :undoc-members: :show-inheritance: .. autofunction:: etl.data_save.parquet.data_save___parquet___ufl2parquet ================================================ FILE: docs/source/etl/etl.decontamination.rst ================================================ etl.decontamination =========================== Identifying and removing contaminated data such as benchmark datasets. .. automodule:: etl.decontamination :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/source/etl/etl.deduplication.rst ================================================ etl.deduplication ========================= Eliminating duplicated data on dataset by dataset basis or globally across multiple datasets. etl.deduplication.common\_crawl module -------------------------------------- .. automodule:: etl.deduplication.common_crawl :members: :undoc-members: :show-inheritance: .. autofunction:: etl.deduplication.common_crawl.deduplication___common_crawl___exact_line etl.deduplication.exact module ------------------------------ .. automodule:: etl.deduplication.exact :members: :undoc-members: :show-inheritance: .. autofunction:: etl.deduplication.exact.deduplication___exact___column etl.deduplication.minhash module -------------------------------- .. automodule:: etl.deduplication.minhash :members: :undoc-members: :show-inheritance: .. autofunction:: etl.deduplication.minhash.deduplication___minhash___lsh_jaccard etl.deduplication.polyglot module --------------------------------- .. automodule:: etl.deduplication.polyglot :members: :undoc-members: :show-inheritance: .. autofunction:: etl.deduplication.polyglot.deduplication___polyglot___minhash ================================================ FILE: docs/source/etl/etl.pii.rst ================================================ etl.pii =============== Ensuring the removal of sensitive information, such as personally identifiable data, from the dataset. etl.pii.card module ------------------- .. automodule:: etl.pii.card :members: :undoc-members: :show-inheritance: .. autofunction:: etl.pii.card.pii___card___replace_card_number etl.pii.nin module ------------------ .. automodule:: etl.pii.nin :members: :undoc-members: :show-inheritance: .. autofunction:: etl.pii.nin.pii___nin___replace_korean_rrn ================================================ FILE: docs/source/etl/etl.pipeline.rst ================================================ etl.pipeline ===================== ETL Interface: user will be interacting with this interface Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license .. autoclass:: etl.pipeline.ETLPipeline :members: .. autofunction:: etl.pipeline.ETLPipeline.status .. autofunction:: etl.pipeline.ETLPipeline.search .. autofunction:: etl.pipeline.ETLPipeline.get .. autofunction:: etl.pipeline.ETLPipeline.setup_spark_conf .. autofunction:: etl.pipeline.ETLPipeline.sample .. autofunction:: etl.pipeline.ETLPipeline.run .. autofunction:: etl.pipeline.ETLPipeline.run_emr ================================================ FILE: docs/source/etl/etl.quality.rst ================================================ etl.quality =================== Improving the quality of data from the perspectives of accuracy, consistency, and reliability for LLMs. etl.quality.language module --------------------------- .. automodule:: etl.quality.language :members: :undoc-members: :show-inheritance: .. autofunction:: etl.quality.language.quality___language___fasttext_filter ================================================ FILE: docs/source/etl/etl.registry.rst ================================================ etl.registry ===================== Base class to support the registration of the ETL classes Copyright (c) 2024-present Upstage Co., Ltd. Apache-2.0 license .. autofunction:: etl.registry.auto_register .. autoclass:: etl.registry.ETLStructure :members: :undoc-members: :show-inheritance: .. autoclass:: etl.registry.ETLRegistry :members: .. autofunction:: etl.registry.ETLRegistry.register .. autofunction:: etl.registry.ETLRegistry.search .. autofunction:: etl.registry.ETLRegistry.get .. autofunction:: etl.registry.ETLRegistry.get_all .. autofunction:: etl.registry.ETLRegistry.reset .. autoclass:: etl.registry.ETLAutoRegistry :members: :undoc-members: :show-inheritance: .. autoclass:: etl.registry.BaseETL :members: :undoc-members: :show-inheritance: .. autofunction:: etl.registry.register_etl ================================================ FILE: docs/source/etl/etl.rst ================================================ etl =========== .. toctree:: :maxdepth: 1 etl.bias etl.cleaning etl.data_ingestion etl.data_save etl.decontamination etl.deduplication etl.pii etl.quality etl.toxicity etl.utils etl.pipeline etl.registry ================================================ FILE: docs/source/etl/etl.toxicity.rst ================================================ etl.toxicity ==================== Identifying and eliminating harmful, offensive, or inappropriate content within the data. .. automodule:: etl.toxicity :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/source/etl/etl.utils.rst ================================================ etl.utils ================= Providing essential functionalities for data processing, including sampling, logging, and statistical analysis. etl.utils.log module -------------------- .. automodule:: etl.utils.log :members: :undoc-members: :show-inheritance: .. autofunction:: etl.utils.log.utils___log___count etl.utils.sampling module ------------------------- .. automodule:: etl.utils.sampling :members: :undoc-members: :show-inheritance: .. autofunction:: etl.utils.sampling.utils___sampling___random etl.utils.statistics module --------------------------- .. automodule:: etl.utils.statistics :members: :undoc-members: :show-inheritance: .. autofunction:: etl.utils.statistics.utils___statistics___korean_nouns ================================================ FILE: docs/source/index.rst ================================================ .. dataverse documentation master file, created by sphinx-quickstart on Thu Feb 29 19:54:35 2024. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. =================== Dataverse =================== .. image:: ../images/dataverse_logo-color.png Dataverse is a freely-accessible open-source project that supports your ETL pipeline with Python. We offer a simple, standardized and user-friendly solution for data processing and management, catering to the needs of data scientists, analysts, and developers in LLM era. Even though you don't know much about Spark, you can use it easily via dataverse. With Dataverse, you are empowered to -------------------------------------- - utilize a range of preprocessing functions without the need to install multiple libraries. - create high-quality data for analysis and training of Large Language Models (LLM). - leverage Spark with ease, regardless of your expertise level. - facilitate smoother collaboration among users with varying degress of Spark proficiency. - enjoy freedom from the limitations of local environments by harnessing the capabilities of AWS EMR. Architecture of Dataverse -------------------------------------- .. image:: ../images/dataverse_system_architecture_white.jpeg Key Features of Dataverse -------------------------------------- - **Block-Based**: In Dataverse, a `block` means a `registered ETL function` which is running on Spark. You can build Spark code like putting together puzzle pieces. You can easily add, take away, or re-arrange pieces to get the results you want via configure. - **Configure-Based**: All the setups for Spark and steps of block can be defined with configure. You don't need to know all the code. Just set up the options, and you're good to go. - **Extensible**: It's designed to meet your specific demands, allowing for custom features that fit perfectly with your project. If you want to know more about Dataverse, please checkout our `docs <https://data-verse.gitbook.io/docs/>`__. .. toctree:: :maxdepth: 2 :hidden: :caption: Getting Started installation quickstart citation .. toctree:: :maxdepth: 5 :hidden: :caption: Documentation etl/etl config/config.interface Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ================================================ FILE: docs/source/installation.rst ================================================ =================================== Installation =================================== Dataverse can be installed using pip --------------------------------------- .. code-block:: python pip install dataverse In order to use *Dataverse*, there are prerequisites you need to have: Python, Spark and Java. In this `link <https://data-verse.gitbook.io/docs/lets-start/installation>`__, you can find guidelines for installing Apache Spark and JDK. Dataverse supports AWS S3 and EMR ------------------------------------ We are providing step by step guides via `link <https://data-verse.gitbook.io/docs/lets-start/aws-setting-guides>`__ to set up AWS S3 and EMR on *Dataverse*. ================================================ FILE: docs/source/quickstart.rst ================================================ =================== Quickstart =================== Various and more detailed tutorials are `here <https://github.com/UpstageAI/dataverse/tree/main/examples>`__. - `add_new_etl_process.ipynb <https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_04_add_new_etl_process.ipynb>`__ : If you want to use your custom function, you have to register the function on Dataverse. This will guide you from register to apply it on pipeline. - `test_etl_process.ipynb <https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_05_test_etl_process.ipynb>`__ : When you want to get test(sample) data to quickly test your ETL process, or need data from a certain point to test your ETL process. - `scaleout_with_EMR.ipynb <https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_06_scaleout_with_EMR.ipynb>`__ : For people who want to run their pipeline on EMR cluster. 1. Set your ETL process as config. `````````````````````````````````` .. code-block:: python from omegaconf import OmegaConf ETL_config = OmegaConf.create({ # Set up Spark 'spark': { 'appname': 'ETL', 'driver': {'memory': '4g'}, }, 'etl': [ { # Extract; You can use HuggingFace datset from hub directly! 'name': 'data_ingestion___huggingface___hf2raw', 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']} }, { # Reduce dataset scale 'name': 'utils___sampling___random', 'args': {'sample_n_or_frac': 0.5} }, { # Transform; deduplicate data via minhash 'name': 'deduplication___minhash___lsh_jaccard', 'args': {'threshold': 0.1, 'ngram_size': 5, 'subset': 'question'} }, { # Load; Save the data 'name': 'data_save___parquet___ufl2parquet', 'args': {'save_path': './guideline/etl/sample/quickstart.parquet'} } ] }) Above code block is an example of an ETL process in *Dataverse*. In *Dataverse*, the available registered ETL functions are referred to as ``blocks``, and this example is comprised of four blocks. You can freely combine these blocks using config to create the ETL processes for your needs. The list of available functions and args of them can be found in the `API Reference <https://data-verse.readthedocs.io/en/latest/>`__. Each functions 'args' should be added in dictionary format. 2. Run ETLpipeline. ``````````````````` .. code-block:: python from dataverse.etl import ETLPipeline etl_pipeline = ETLPipeline() spark, dataset = etl_pipeline.run(config=ETL_config, verbose=True) ETLPipeline is an object designed to manage the ETL processes. By inserting ``ETL_config`` which is defined in the previous step into ETLpipeline object and calling the ``run`` method, stacked ETL blocks will execute in the order they were stacked. 3. Result file is saved on the save_path ``````````````````````````````````````````` .. code-block:: python import pandas as pd pd.read_parquet('./guideline/etl/sample/quickstart.parquet') As the example gave ``save_path`` argument to the last block of ``ETL_config``, data passed through the process will be saved on the given path. ================================================ FILE: docs/source/requirements.txt ================================================ sphinx sphinx-pdj-theme sphinx-rtd-theme requests numpy pandas fasttext-wheel omegaconf datasets pyspark scipy trafilatura html2text faker boto3 pre-commit==3.6.0 botocore rsa s3transfer isort pytest ================================================ FILE: examples/README.md ================================================ # 🌍 Examples > This is a example collection for `dataverse`. We will talk about the basic usage of `dataverse`, knowhows, and how to use it in your project. ### 🙋 I'm very new to Dataverse Introduces very basic, but core steps to use Dataverse. - [ETL_01_how_to_run.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_01_how_to_run.ipynb) - [ETL_02_one_cycle.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_02_one_cycle.ipynb) ### 🙋 I want to use my custom function If you want to use your custom function, you have to register the function on Dataverse. These will guide you from register to apply it on pipeline. - [ETL_03_create_new_etl_process.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_03_create_new_etl_process.ipynb) - [ETL_04_add_new_etl_process.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_04_add_new_etl_process.ipynb) ### 🙋 I need to test my ETL process with samples When you want to get test(sample) data to quickly test your ETL process, or need data from a certain point to test your ETL process - [ETL_05_test_etl_process.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_05_test_etl_process.ipynb) ### 🙋 I want to run it on EMR cluster Check AWS S3 Support for settings - [ETL_06_scaleout_with_EMR.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/ETL_06_scaleout_with_EMR.ipynb) ### 🙋 Is there any real-world dataset to use Dataverse? Shows how to use common crawl data. - [EX_use_common_crawl_data.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/EX_use_common_crawl_data.ipynb) ### 🙋 I want to use Pyspark UI Helps you to use Pyspark UI to monitor the spark job in Docker environment. - [EX_use_pyspark_ui.ipynb](https://github.com/UpstageAI/dataverse/blob/main/examples/etl/EX_use_pyspark_ui.ipynb) ================================================ FILE: examples/etl/ETL_01_how_to_run.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ETL how to run?\n", "> At here we will talk about how to run ETL. There is 2 steps to run ETL.\n", "\n", "1. prepare config\n", "2. put config to ETLPipeline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 1. prepare config" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "from pathlib import Path\n", "from dataverse.config import Config \n", "from omegaconf import OmegaConf" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Option 1: When you cloned the Dataverse repository\n", "- This method loads the config file from the directory based on the Dataverse repository.\n", "- If you haven't cloned the repository, please follow option 2." ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# E = Extract, T = Transform, L = Load\n", "main_path = Path(os.path.abspath('../..'))\n", "E_path = main_path / \"./dataverse/config/etl/sample/data_ingestion___sampling.yaml\"\n", "T_path = main_path / \"./dataverse/config/etl/sample/data_preprocess___dedup.yaml\"\n", "L_path = main_path / \"./dataverse/config/etl/sample/data_save___hf_obj.yaml\"\n", "\n", "E_config = Config.load(E_path)\n", "T_config = Config.load(T_path)\n", "L_config = Config.load(L_path)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Option 2: When you HAVEN'T Cloned the Dataverse Repository\n", "- With this method, we define each E, T, L config in the shell.\n", "- These configs are exactly the same as each file mentioned above." ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "E_config = OmegaConf.create({\n", " 'spark': { \n", " 'appname': 'dataverse_etl_sample',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " { \n", " 'name': 'data_ingestion___test___generate_fake_ufl', \n", " },\n", " {\n", " 'name': 'utils___sampling___random',\n", " 'args': {'sample_n_or_frac': 0.1}\n", " },\n", " {\n", " 'name': 'data_save___parquet___ufl2parquet',\n", " 'args': {'save_path': \"./sample/sample_ufl.parquet\"}\n", " },\n", " ]\n", " })\n", "\n", "T_config = OmegaConf.create({\n", " 'spark': { \n", " 'appname': 'dataverse_etl_sample',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " { \n", " 'name': 'data_ingestion___parquet___pq2raw', \n", " 'args': {'path': \"./sample/sample_ufl.parquet\"}\n", " },\n", " {\n", " 'name': 'deduplication___minhash___lsh_jaccard',\n", " },\n", " {\n", " 'name': 'data_save___parquet___ufl2parquet',\n", " 'args': {'save_path': \"./sample/preprocess_ufl.parquet\"}\n", " },\n", " ]\n", " })\n", "\n", "L_config = OmegaConf.create({\n", " 'spark': { \n", " 'appname': 'dataverse_etl_sample',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " { \n", " 'name': 'data_ingestion___parquet___pq2raw', \n", " 'args': {'path': './sample/preprocess_ufl.parquet'}\n", " },\n", " {\n", " 'name': 'data_save___huggingface___ufl2hf_obj',\n", " },\n", " ]\n", " })" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Extract Config\n", "\n", "- load fake generation UFL data\n", "- sample 10% of total data to reduce the size of dataset\n", "- save to parquet `dataverse/sample/sample_ufl.parquet`" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: dataverse_etl_sample\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___test___generate_fake_ufl\n", "- name: utils___sampling___random\n", " args:\n", " sample_n_or_frac: 0.1\n", "- name: data_save___parquet___ufl2parquet\n", " args:\n", " save_path: ./sample/sample_ufl.parquet\n", "\n" ] } ], "source": [ "print(OmegaConf.to_yaml(E_config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Transform Config\n", "\n", "- load parquet `./sample/sample_ufl.parquet`\n", "- deduplicate by `text` column, 15-gram minhash jaccard similarity\n", "- save to parquet `./sample/preprocess_ufl.parquet`" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: dataverse_etl_sample\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___parquet___pq2raw\n", " args:\n", " path: ./sample/sample_ufl.parquet\n", "- name: deduplication___minhash___lsh_jaccard\n", "- name: data_save___parquet___ufl2parquet\n", " args:\n", " save_path: ./sample/preprocess_ufl.parquet\n", "\n" ] } ], "source": [ "print(OmegaConf.to_yaml(T_config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Load Config\n", "\n", "- load parquet `./sample/preprocess_ufl.parquet`\n", "- convert to huggingface dataset and return the object" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: dataverse_etl_sample\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___parquet___pq2raw\n", " args:\n", " path: ./sample/preprocess_ufl.parquet\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "print(OmegaConf.to_yaml(L_config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 2. put config to ETLPipeline" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "An error occurred (ExpiredToken) when calling the GetCallerIdentity operation: The security token included in the request is expired\n" ] } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[ No AWS Credentials Found] - Failed to set spark conf for S3\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "24/04/15 22:10:33 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "24/04/15 22:10:33 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[ No AWS Credentials Found] - Failed to set spark conf for S3\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "24/04/15 22:10:38 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[ No AWS Credentials Found] - Failed to set spark conf for S3\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "24/04/15 22:10:45 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", " \r" ] } ], "source": [ "# raw -> ufl\n", "etl_pipeline.run(E_config)\n", "\n", "# ufl -> dedup -> ufl\n", "etl_pipeline.run(T_config)\n", "\n", "# ufl -> hf_obj\n", "spark, dataset = etl_pipeline.run(L_config)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Dataset({\n", " features: ['id', 'meta', 'name', 'text'],\n", " num_rows: 14\n", "})" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'id': 'a3715cee-e252-4360-9a15-93a3fcc832fb',\n", " 'meta': '{\"name\": \"Caitlin Hughes\", \"age\": 55, \"address\": \"517 Cassandra Mountains\\\\nJamesberg, NM 13313\", \"job\": \"Orthoptist\"}',\n", " 'name': 'test_fake_ufl',\n", " 'text': 'Necessary miss set choice car hour. Only man interest affect. Cover black protect successful president court memory.'}" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/ETL_02_one_cycle.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ETL one cycle\n", "> Normally ETL is processed by 3 steps, E, T, L :) but we could do it by one cycle, ETL.\n", "\n", "We are going to use the 3 configs from `ETL_how_to_run.ipynb` and merge it to one config file." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 1. prepare config" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "from pathlib import Path\n", "from dataverse.config import Config \n", "from omegaconf import OmegaConf\n", "\n", "# E = Extract, T = Transform, L = Load\n", "main_path = Path(os.path.abspath('../..'))\n", "ETL_path = main_path / \"./dataverse/config/etl/sample/ETL___one_cycle.yaml\"\n", "\n", "ETL_config = Config.load(ETL_path)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Wait! If you haven't clone the repository, run the shell script below." ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "ETL_config = OmegaConf.create({\n", " 'spark': { \n", " 'appname': 'dataverse_etl_sample',\n", " 'driver': {'memory': '16g'} \n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___test___generate_fake_ufl'\n", " },\n", " {\n", " 'name': 'utils___sampling___random',\n", " 'args': {'sample_n_or_frac': 0.1}\n", " },\n", " {\n", " 'name': 'deduplication___minhash___lsh_jaccard'\n", " },\n", " {\n", " 'name': 'data_save___huggingface___ufl2hf_obj'\n", " }\n", " ]\n", "})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 ETL Config\n", "> One cycle from raw to huggingface dataset\n", "\n", "- load fake generation UFL data\n", "- sample 10% of total data to reduce the size of dataset\n", "- deduplicate by `text` column, 15-gram minhash jaccard similarity\n", "- convert to huggingface dataset and return the object" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: dataverse_etl_sample\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___test___generate_fake_ufl\n", "- name: utils___sampling___random\n", " args:\n", " sample_n_or_frac: 0.1\n", "- name: deduplication___minhash___lsh_jaccard\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 2. put config to ETLPipeline" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[ No AWS Credentials Found] - Failed to set spark conf for S3\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "24/04/15 22:26:20 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "24/04/15 22:26:20 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", " \r" ] } ], "source": [ "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Dataset({\n", " features: ['id', 'meta', 'name', 'text'],\n", " num_rows: 14\n", "})" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'id': '32ff39e5-2a88-45dc-a69e-b59b05f51216',\n", " 'meta': '{\"name\": \"Laura White\", \"age\": 49, \"address\": \"126 Javier Islands Apt. 925\\\\nPort Jasonshire, UT 60978\", \"job\": \"Mining engineer\"}',\n", " 'name': 'test_fake_ufl',\n", " 'text': 'Your whose admit ask herself. Public mission far program tough.\\nEconomic talk few minute. Budget face yeah along difference. Evening heart throughout.'}" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/ETL_03_create_new_etl_process.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ETL create new etl process\n", "> Create your custom ETL process to the ETL pipeline.\n", "\n", "when you want to create your own ETL process, it could be tricky.\n", "here is a simple example to show you where to start to create your own ETL process." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 1. Start from ETL Pipeline you wanna add your own ETL process\n", "> simple ETL pipeline to load huggingface dataset" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: ETL\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___huggingface___hf2raw\n", " args:\n", " name_or_path:\n", " - ai2_arc\n", " - ARC-Challenge\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'ETL',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___huggingface___hf2raw',\n", " 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']}\n", " },\n", " {\n", " 'name': 'data_save___huggingface___ufl2hf_obj'\n", " }\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "23/11/14 19:02:29 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "23/11/14 19:02:29 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", "Found cached dataset ai2_arc (/root/.cache/huggingface/datasets/ai2_arc/ARC-Challenge/1.0.0/1569c2591ea2683779581d9fb467203d9aa95543bb9b75dcfde5da92529fd7f6)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9233872e69a545e5a338bdc9b1154537", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00<?, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c5a31a78b3e9459fb28f8680b14491a8", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Creating parquet from Arrow format: 0%| | 0/3 [00:00<?, ?ba/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading and preparing dataset spark/-1076059055 to /root/.cache/huggingface/datasets/spark/-1076059055/0.0.0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset spark downloaded and prepared to /root/.cache/huggingface/datasets/spark/-1076059055/0.0.0. Subsequent calls will reuse this data.\n" ] }, { "data": { "text/plain": [ "Dataset({\n", " features: ['answerKey', 'choices', 'id', 'question'],\n", " num_rows: 2590\n", "})" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)\n", "dataset" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 2. choose where you wanna add your own ETL process\n", "> remove or comment out the following ETL process from config!" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: ETL\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___huggingface___hf2raw\n", " args:\n", " name_or_path:\n", " - ai2_arc\n", " - ARC-Challenge\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'ETL',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___huggingface___hf2raw',\n", " 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']}\n", " },\n", " \n", " # TODO: you want to add your own ETL process from here\n", "\n", " # TODO: if so, you need to add the following ETL process!\n", " # remove or comment out the following ETL process\n", " # {\n", " # 'name': 'data_load___huggingface___ufl2hf_obj'\n", " # }\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/14 19:02:42 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Found cached dataset ai2_arc (/root/.cache/huggingface/datasets/ai2_arc/ARC-Challenge/1.0.0/1569c2591ea2683779581d9fb467203d9aa95543bb9b75dcfde5da92529fd7f6)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "1835993218fb488a9cc02bcdef0f49b9", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00<?, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> spark, data[rdd, Dataframe]\n", "spark, data = etl_pipeline.run(ETL_config)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " <div>\n", " <p><b>SparkSession - in-memory</b></p>\n", " \n", " <div>\n", " <p><b>SparkContext</b></p>\n", "\n", " <p><a href=\"http://instance-3730:4040\">Spark UI</a></p>\n", "\n", " <dl>\n", " <dt>Version</dt>\n", " <dd><code>v3.4.1</code></dd>\n", " <dt>Master</dt>\n", " <dd><code>local[10]</code></dd>\n", " <dt>AppName</dt>\n", " <dd><code>ETL</code></dd>\n", " </dl>\n", " </div>\n", " \n", " </div>\n", " " ], "text/plain": [ "<pyspark.sql.session.SparkSession at 0x7fe58d1ede40>" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "spark" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "PythonRDD[13] at RDD at PythonRDD.scala:53" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 3. Check the current process so far!\n", "> use spark to check the current process so far!\n", "- `collect` is heavy so recommend to use `take` instead of `collect`!" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 'Mercury_7029645',\n", " 'question': 'Metal atoms will most likely form ions by the',\n", " 'choices': Row(text=['loss of electrons.', 'loss of protons.', 'gain of electrons.', 'gain of protons.'], label=['A', 'B', 'C', 'D']),\n", " 'answerKey': 'A'},\n", " {'id': 'Mercury_7216598',\n", " 'question': 'Which phrase does not describe asexual reproduction in organisms?',\n", " 'choices': Row(text=['requires two parents', 'little variation in offspring', 'only one type of cell involved', 'duplicates its genetic material'], label=['A', 'B', 'C', 'D']),\n", " 'answerKey': 'A'},\n", " {'id': 'MDSA_2008_5_40',\n", " 'question': 'A student is investigating changes in the states of matter. The student fills a graduated cylinder with 50 milliliters of packed snow. The graduated cylinder has a mass of 50 grams when empty and 95 grams when filled with the snow. The packed snow changes to liquid water when the snow is put in a warm room. Which statement best describes this process?',\n", " 'choices': Row(text=['Cooling causes the snow to melt.', 'Cooling causes the snow to freeze.', 'Heating causes the snow to freeze.', 'Heating causes the snow to melt.'], label=['A', 'B', 'C', 'D']),\n", " 'answerKey': 'D'}]" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data.take(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 4. Create your own ETL process\n", "> what do you want to do after all? \n", "\n", "Let's say you want to add `filter` process to the ETL pipeline.\n", "- you want to remove `choices` key from the dataset" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "data = data.map(lambda x: {k: v for k, v in x.items() if k != 'choices'})" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 'Mercury_7029645',\n", " 'question': 'Metal atoms will most likely form ions by the',\n", " 'answerKey': 'A'},\n", " {'id': 'Mercury_7216598',\n", " 'question': 'Which phrase does not describe asexual reproduction in organisms?',\n", " 'answerKey': 'A'},\n", " {'id': 'MDSA_2008_5_40',\n", " 'question': 'A student is investigating changes in the states of matter. The student fills a graduated cylinder with 50 milliliters of packed snow. The graduated cylinder has a mass of 50 grams when empty and 95 grams when filled with the snow. The packed snow changes to liquid water when the snow is put in a warm room. Which statement best describes this process?',\n", " 'answerKey': 'D'}]" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data.take(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Hey! it's working ;)! `choices` key are removed from the dataset!" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 5. Working? It's time to add to the ETL Registry\n", "> working great? it's time to move to how to add to the ETL Registry!\n", "[ETL_add_new_etl_process.ipynb](https://github.com/UpstageAI/dataverse/blob/main/guideline/etl/ETL_add_new_etl_process.ipynb)\n", "\n", "Check out the guideline from above notebook. and for preview here is the function template to add to the ETL Registry.\n", "\n", "```python\n", "# before\n", "data = data.map(lambda x: {k: v for k, v in x.items() if k != 'choices'})\n", "\n", "# after\n", "def your___custom___etl_process(spark, data, *args, **kwargs):\n", " # add your custom process here\n", " # here we are going to simply remove 'choices' key\n", " data = data.map(lambda x: {k: v for k, v in x.items() if k != 'choices'})\n", "\n", " return data\n", "```" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/ETL_04_add_new_etl_process.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ETL add new etl process\n", "> Add your custom ETL process to the ETL pipeline." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Original ETL Pipeline \n", "> This is simple ETL pipeline to load huggingface dataset" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: ETL\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___huggingface___hf2raw\n", " args:\n", " name_or_path:\n", " - ai2_arc\n", " - ARC-Challenge\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'ETL',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___huggingface___hf2raw',\n", " 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']}\n", " },\n", " {\n", " 'name': 'data_save___huggingface___ufl2hf_obj'\n", " }\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "23/11/14 19:26:56 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "23/11/14 19:26:56 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", "23/11/14 19:26:56 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.\n", "Found cached dataset ai2_arc (/root/.cache/huggingface/datasets/ai2_arc/ARC-Challenge/1.0.0/1569c2591ea2683779581d9fb467203d9aa95543bb9b75dcfde5da92529fd7f6)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0aeed70bb9b34721aa5f6e8abf72a85f", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00<?, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading and preparing dataset spark/14056872 to /root/.cache/huggingface/datasets/spark/14056872/0.0.0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset spark downloaded and prepared to /root/.cache/huggingface/datasets/spark/14056872/0.0.0. Subsequent calls will reuse this data.\n" ] }, { "data": { "text/plain": [ "Dataset({\n", " features: ['answerKey', 'choices', 'id', 'question'],\n", " num_rows: 2590\n", "})" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)\n", "dataset" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'answerKey': 'A',\n", " 'choices': {'text': ['loss of electrons.',\n", " 'loss of protons.',\n", " 'gain of electrons.',\n", " 'gain of protons.'],\n", " 'label': ['A', 'B', 'C', 'D']},\n", " 'id': 'Mercury_7029645',\n", " 'question': 'Metal atoms will most likely form ions by the'}" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Add Custom ETL Process\n", "\n", "1. create your custom ETL process\n", "2. check ETL process is registered\n", "3. wrap it with `register_etl` decorator\n", "4. add your custom ETL process to the ETL config\n", "5. run the ETL pipeline\n", "\n", "Here you are going to make a simple " ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "from dataverse.etl import register_etl" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 1. create your custom ETL process\n", "\n", "- naming convention is `cate___sub-cate___name`\n", " - e.g. `huggingface___dataset___load_dataset`\n", "- for input because we are using huggingface dataset `List[Dict]` format will be inserted\n", "\n", "```python\n", "# ai2_arc format\n", "[\n", " {\n", " 'id': ...,\n", " 'choices': ...,\n", " 'question': ...,\n", " 'answerKey': ...,\n", " },\n", " {...},\n", " ...\n", "]\n", "```\n", "\n", "Make a spark process assuming `List[Dict]` is given. Here we are simply going to remove `choices` key from each data point" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def your___custom___etl_process(spark, data, *args, **kwargs):\n", " # add your custom process here\n", " # here we are going to simply remove 'choices' key\n", " data = data.map(lambda x: {k: v for k, v in x.items() if k != 'choices'})\n", "\n", " return data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 2. check ETL process is registered\n", "\n", "ETL Pipeline only runs registered ETL process" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "An error occurred (InvalidClientTokenId) when calling the GetCallerIdentity operation: The security token included in the request is invalid\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/root/anaconda3/envs/dataverse/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] }, { "data": { "text/plain": [ "==================================================\n", "Total [ 43 ]\n", "==================================================\n", "data_ingestion [ 16 ]\n", "deduplication [ 4 ]\n", "cleaning [ 13 ]\n", "pii [ 2 ]\n", "quality [ 1 ]\n", "data_load [ 4 ]\n", "utils [ 3 ]" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLRegistry \n", "\n", "# we can see our custom is not registered yet\n", "ETLRegistry()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 3. wrap it with `register_etl` decorator\n", "\n", "How to register your custom ETL process?\n", "Simply wrap it with `register_etl` decorator\n", "\n", "```python\n", "@register_etl\n", "def your_custom_etl_process():" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "@register_etl\n", "def your___custom___etl_process(spark, data, *args, **kwargs):\n", " # remove all text\n", " data = data.map(lambda x: {k: v for k, v in x.items() if k != 'choices'})\n", "\n", " return data" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "==================================================\n", "Total [ 44 ]\n", "==================================================\n", "data_ingestion [ 16 ]\n", "deduplication [ 4 ]\n", "cleaning [ 13 ]\n", "pii [ 2 ]\n", "quality [ 1 ]\n", "data_load [ 4 ]\n", "utils [ 3 ]\n", "your [ 1 ]" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# you will see your custom etl is registered\n", "ETLRegistry()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 4. add your custom ETL process to the ETL config\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: ETL\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___huggingface___hf2raw\n", " args:\n", " name_or_path:\n", " - ai2_arc\n", " - ARC-Challenge\n", "- name: your___custom___etl_process\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'ETL',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___huggingface___hf2raw',\n", " 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']}\n", " },\n", "\n", " # ======== add your custom etl here ========\n", " {\n", " 'name': 'your___custom___etl_process'\n", " },\n", " # ==========================================\n", "\n", " {\n", " 'name': 'data_save___huggingface___ufl2hf_obj'\n", " }\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 5. run the ETL pipeline\n", "\n", "You can check that ETL process you added customly works great and `choices` are removed." ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/14 19:27:13 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", "23/11/14 19:27:13 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Found cached dataset ai2_arc (/root/.cache/huggingface/datasets/ai2_arc/ARC-Challenge/1.0.0/1569c2591ea2683779581d9fb467203d9aa95543bb9b75dcfde5da92529fd7f6)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "7ef3d804674d408ba6696c00e6e58bd1", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00<?, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading and preparing dataset spark/1082445423 to /root/.cache/huggingface/datasets/spark/1082445423/0.0.0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset spark downloaded and prepared to /root/.cache/huggingface/datasets/spark/1082445423/0.0.0. Subsequent calls will reuse this data.\n" ] }, { "data": { "text/plain": [ "Dataset({\n", " features: ['answerKey', 'id', 'question'],\n", " num_rows: 2590\n", "})" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)\n", "dataset" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'answerKey': 'A',\n", " 'id': 'Mercury_7029645',\n", " 'question': 'Metal atoms will most likely form ions by the'}" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/ETL_05_test_etl_process.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ETL test etl process\n", "> when you want to get `test`(sample) data to quickly test your ETL process, or need `data from a certain point` to test your ETL process, you can check how to do it here." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Get `test`(sample) data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 get `test`(sample) data `w/o config`\n", "> when you have created a ETL process and don't wanna set config from the scratch here is a quick way to get the sample data" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "23/11/14 19:37:55 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "total data # : 100\n", "sample data :\n" ] }, { "data": { "text/plain": [ "[{'id': 'e2ce9284-8691-471b-88e3-ba29a5888fd1',\n", " 'name': 'test_fake_ufl',\n", " 'text': 'Simple toward doctor any. Rich name reality bad family. Gas mind even important stay describe official.\\nThere recognize campaign wind on. Drop sport however central read.',\n", " 'meta': '{\"name\": \"Amanda Ross\", \"age\": 60, \"address\": \"302 Rebecca Camp\\\\nPatrickborough, CT 40755\", \"job\": \"Broadcast engineer\"}'}]" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, data = etl_pipeline.sample()\n", "\n", "# default sampling will return 100 `ufl` data\n", "print(f\"total data # : {data.count()}\")\n", "print(f\"sample data :\")\n", "data.take(1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "when you want to increase the sample size do the following\n", "```python\n", "spark, data = etl_pipeline.sample(n=10000)\n", "spark, data = etl_pipeline.sample(10000)\n", "```" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "total data # : 10000\n", "sample data :\n" ] }, { "data": { "text/plain": [ "[{'id': '79081a73-5c82-432d-bf4a-f7de8bf59d12',\n", " 'name': 'test_fake_ufl',\n", " 'text': 'Serious teacher follow they entire between. Far see issue view throughout order field.\\nWant senior sell amount picture. Tree cell low edge.',\n", " 'meta': '{\"name\": \"Jack Yoder\", \"age\": 75, \"address\": \"083 Diana Parkway Suite 438\\\\nLake Amberport, AS 76996\", \"job\": \"Haematologist\"}'}]" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "spark, data = etl_pipeline.sample(10000)\n", "print(f\"total data # : {data.count()}\")\n", "print(f\"sample data :\")\n", "data.take(1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 get `test`(sample) data `w/ config`\n", "> this might took some time to get the data but you can choose your own data\n", "- this was also introduced in `ETL_03_create_new_etl_process.ipynb`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Getting sample data `you want`" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: ETL\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___huggingface___hf2raw\n", " args:\n", " name_or_path:\n", " - ai2_arc\n", " - ARC-Challenge\n", "- name: utils___sampling___random\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'ETL',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___huggingface___hf2raw',\n", " 'args': {'name_or_path': ['ai2_arc', 'ARC-Challenge']}\n", " },\n", " {'name': 'utils___sampling___random'}\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/14 19:38:01 WARN SparkSession: Using an existing Spark session; only runtime SQL configurations will take effect.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Found cached dataset ai2_arc (/root/.cache/huggingface/datasets/ai2_arc/ARC-Challenge/1.0.0/1569c2591ea2683779581d9fb467203d9aa95543bb9b75dcfde5da92529fd7f6)\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "efc259f86fec4f76a1165f661ebf13d2", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/3 [00:00<?, ?it/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "total data # : 280\n", "sample data :\n" ] }, { "data": { "text/plain": [ "[{'id': 'Mercury_7029645',\n", " 'question': 'Metal atoms will most likely form ions by the',\n", " 'choices': Row(text=['loss of electrons.', 'loss of protons.', 'gain of electrons.', 'gain of protons.'], label=['A', 'B', 'C', 'D']),\n", " 'answerKey': 'A'}]" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, data = etl_pipeline.run(ETL_config)\n", "print(f\"total data # : {data.count()}\")\n", "print(f\"sample data :\")\n", "data.take(1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Test your ETL process\n", "> its time to test your ETL process with the sample data. define ETL process and run it" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/14 19:38:06 WARN SparkSession: Using an existing Spark session; only runtime SQL configurations will take effect.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "total data # : 100\n", "sample data :\n" ] }, { "data": { "text/plain": [ "[{'id': 'eec9b075-b786-454c-a398-f69d8cf39739',\n", " 'name': 'test_fake_ufl',\n", " 'text': 'Country toward ago old right.\\nNewspaper hotel although short. Hair actually building.\\nWe build then blue hundred perform wall.',\n", " 'meta': '{\"name\": \"Michael Aguirre\", \"age\": 18, \"address\": \"8324 Jennings Road Apt. 378\\\\nLatoyahaven, MT 27716\", \"job\": \"Television camera operator\"}'}]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "from dataverse.etl import register_etl\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# get sample data\n", "spark, data = etl_pipeline.sample()\n", "print(f\"total data # : {data.count()}\")\n", "print(f\"sample data :\")\n", "data.take(1)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "@register_etl\n", "def test___your___etl_process(spark, data, *args, **kwargs):\n", " # add your custom process here\n", " # here we are going to simply remove 'id' key\n", " data = data.map(lambda x: {k: v for k, v in x.items() if k != 'id'})\n", "\n", " return data" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'name': 'test_fake_ufl',\n", " 'text': 'Country toward ago old right.\\nNewspaper hotel although short. Hair actually building.\\nWe build then blue hundred perform wall.',\n", " 'meta': '{\"name\": \"Michael Aguirre\", \"age\": 18, \"address\": \"8324 Jennings Road Apt. 378\\\\nLatoyahaven, MT 27716\", \"job\": \"Television camera operator\"}'}]" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# test right away\n", "# - successfully removed `id` key\n", "etl = test___your___etl_process\n", "etl()(spark, data).take(1)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'name': 'test_fake_ufl',\n", " 'text': 'Country toward ago old right.\\nNewspaper hotel although short. Hair actually building.\\nWe build then blue hundred perform wall.',\n", " 'meta': '{\"name\": \"Michael Aguirre\", \"age\": 18, \"address\": \"8324 Jennings Road Apt. 378\\\\nLatoyahaven, MT 27716\", \"job\": \"Television camera operator\"}'}]" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# test it is registered by calling it from etl_pipeline\n", "# - successfully removed `id` key\n", "etl = etl_pipeline.get('test___your___etl_process')\n", "etl()(spark, data).take(1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Experiments on the data itself\n", "> there is no chosen way to use this `test`(sample) data. you can do whatever you want with it. here are some examples" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 'eec9b075-b786-454c-a398-f69d8cf39739',\n", " 'name': 'test_fake_ufl',\n", " 'text': 'Country toward ago old right.\\nNewspaper hotel although short. Hair actually building.\\nWe build then blue hundred perform wall.',\n", " 'meta': '{\"name\": \"Michael Aguirre\", \"age\": 18, \"address\": \"8324 Jennings Road Apt. 378\\\\nLatoyahaven, MT 27716\", \"job\": \"Television camera operator\"}',\n", " 'duck': 'is quarking (physics)'}]" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data.map(lambda x: {**x, 'duck': 'is quarking (physics)'}).take(1)" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/ETL_06_scaleout_with_EMR.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# ETL scaleout with EMR\n", "> when you have money but don't have enough device to process your data, it's time to use EMR" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Set AWS Credentials\n", "> This notebook assumes that you have already set your AWS credentials in your local machine. If not, please follow the steps below to set your AWS credentials.\n", "\n", "```bash\n", "aws configure\n", " - key: <your access key>\n", " - secret: <your secret key>\n", " - region: <your region>\n", "aws configure set aws_session_token <your session token>\n", "```" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "AWS credentials are valid\n" ] } ], "source": [ "from dataverse.utils.api import aws_check_credentials \n", "\n", "# check aws credentials\n", "# NOTE: `True` means credentials are valid\n", "if aws_check_credentials() == True:\n", " print(\"AWS credentials are valid\")\n", "else:\n", " raise Exception(\"AWS credentials are invalid\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Set up Temporary Data & Environment\n", "> Here you don't need to prepare any data. We will create a temporary data and set temporary environment for you." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Create Temporary Folder at Local & AWS S3" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import tempfile\n", "import uuid\n", "\n", "from dataverse.utils.api import aws_s3_upload\n", "from dataverse.utils.api import aws_s3_create_bucket\n", "\n", "\n", "# create temp local & s3 path\n", "tmp_folder = tempfile.TemporaryDirectory()\n", "tmp_bucket = uuid.uuid4().hex\n", "\n", "aws_s3_create_bucket(bucket=tmp_bucket)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Create Temporary Data and upload to Local & AWS S3\n", "> Data will be duplicated" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import os\n", "import pandas as pd\n", "from dataverse.utils.api import aws_s3_upload\n", "\n", "\n", "# create sample data and upload to s3\n", "sample_path = os.path.join(tmp_folder.name, 'duplicate.json')\n", "\n", "# create ufl data that has duplication\n", "ufl = [\n", " {'text': \"random text\\nduplication\"},\n", " {'text': \"fixed text\\nduplication\"},\n", " {'text': \"fixed text\\nduplication\\nDUPLICATION\"},\n", "]\n", "df = pd.DataFrame(ufl)\n", "df.to_parquet(sample_path)\n", "\n", "bucket = aws_s3_upload(\n", " bucket=tmp_bucket,\n", " key='duplicate.json',\n", " local_path=sample_path,\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Temporary Dynamic ETL\n", "> To show you that you can add temporal dynamic ETL " ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing dynamic_etl.py\n" ] } ], "source": [ "%%writefile dynamic_etl.py\n", "from dataverse.etl import register_etl\n", "from pyspark.rdd import RDD\n", "from pyspark.sql import DataFrame\n", "\n", "\n", "@register_etl\n", "def test___add___one(spark, data, subset='text', *args, **kwargs):\n", " if isinstance(data, DataFrame):\n", " data = data.rdd\n", " data = data.map(lambda row: row.asDict())\n", "\n", " def _add_one(row):\n", " row[subset] = row[subset] + '1'\n", " return row\n", "\n", " data = data.map(_add_one)\n", "\n", " return data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Create Temporary Config\n", "- load parquet from s3\n", "- exact deduplicate by line splitted by newline\n", "- add `1` text at the end of each data `text`\n", "- save as parquet to s3" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Detected Dataverse Bucket: dataverse-dv42-d853ea88-c87d-486f-b3b5-d780203bc262\n", "spark:\n", " master: local[10]\n", " appname: default\n", " driver:\n", " memory: 8G\n", " maxResultSize: 2G\n", " executor:\n", " memory: 1G\n", " local:\n", " dir: /root/.cache/dataverse/tmp\n", " ui:\n", " port: 4040\n", "etl:\n", "- name: data_ingestion___parquet___pq2ufl\n", " args:\n", " path: s3a://581f4bedcaf24703b248e73d4ecefabd/duplicate.json\n", " repartition: 1\n", "- name: deduplication___common_crawl___exact_line\n", "- name: test___add___one\n", "- name: data_load___parquet___ufl2parquet\n", " args:\n", " save_path: s3a://581f4bedcaf24703b248e73d4ecefabd/deduplicate.parquet\n", "\n" ] } ], "source": [ "from dataverse.config import Config\n", "from omegaconf import OmegaConf\n", "\n", "load_path = f\"s3a://{tmp_bucket}/duplicate.json\"\n", "save_path = f\"s3a://{tmp_bucket}/deduplicate.parquet\"\n", "\n", "config = Config.default()\n", "config.etl.append({\n", " 'name': 'data_ingestion___parquet___pq2ufl',\n", " 'args': {\n", " 'path': load_path,\n", " 'repartition': 1\n", " }}\n", ")\n", "config.etl.append({'name': 'deduplication___common_crawl___exact_line'})\n", "config.etl.append({'name': 'test___add___one'})\n", "config.etl.append({\n", " 'name': 'data_load___parquet___ufl2parquet',\n", " 'args': {'save_path': save_path}})\n", "\n", "print(OmegaConf.to_yaml(config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 ETLPipeline with `Local`\n", "> We will test our ETL pipeline with local machine first" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Import `dynamic_etl.py` to add custom ETL" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Detected Dataverse Bucket: dataverse-dv42-d853ea88-c87d-486f-b3b5-d780203bc262\n" ] } ], "source": [ "# you can import before running the etl\n", "import dynamic_etl" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 run ETL Pipeline with Local machine\n", "> as the config specified\n", "\n", "- we will load data from s3\n", "- exact deduplicate by line splitted by newline\n", "- add `1` text at the end of each data `text`\n", "- and save as parquet to s3" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark conf is set with [ temporary ] S3 credentials\n", ":: loading settings :: url = jar:file:/data/project/private/ducky/anaconda3/envs/llm/lib/python3.10/site-packages/pyspark/jars/ivy-2.5.1.jar!/org/apache/ivy/core/settings/ivysettings.xml\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Ivy Default Cache set to: /root/.ivy2/cache\n", "The jars for the packages stored in: /root/.ivy2/jars\n", "org.apache.hadoop#hadoop-aws added as a dependency\n", "com.amazonaws#aws-java-sdk-bundle added as a dependency\n", ":: resolving dependencies :: org.apache.spark#spark-submit-parent-bbfd8d7f-e9d3-48d9-b3a0-6ac07189c03d;1.0\n", "\tconfs: [default]\n", "\tfound org.apache.hadoop#hadoop-aws;3.3.4 in central\n", "\tfound org.wildfly.openssl#wildfly-openssl;1.0.7.Final in central\n", "\tfound com.amazonaws#aws-java-sdk-bundle;1.12.592 in central\n", ":: resolution report :: resolve 128ms :: artifacts dl 4ms\n", "\t:: modules in use:\n", "\tcom.amazonaws#aws-java-sdk-bundle;1.12.592 from central in [default]\n", "\torg.apache.hadoop#hadoop-aws;3.3.4 from central in [default]\n", "\torg.wildfly.openssl#wildfly-openssl;1.0.7.Final from central in [default]\n", "\t:: evicted modules:\n", "\tcom.amazonaws#aws-java-sdk-bundle;1.12.262 by [com.amazonaws#aws-java-sdk-bundle;1.12.592] in [default]\n", "\t---------------------------------------------------------------------\n", "\t| | modules || artifacts |\n", "\t| conf | number| search|dwnlded|evicted|| number|dwnlded|\n", "\t---------------------------------------------------------------------\n", "\t| default | 4 | 0 | 0 | 1 || 3 | 0 |\n", "\t---------------------------------------------------------------------\n", ":: retrieving :: org.apache.spark#spark-submit-parent-bbfd8d7f-e9d3-48d9-b3a0-6ac07189c03d\n", "\tconfs: [default]\n", "\t0 artifacts copied, 3 already retrieved (0kB/4ms)\n", "23/12/14 21:39:58 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "23/12/14 21:39:58 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", "23/12/14 21:40:01 WARN MetricsConfig: Cannot locate configuration: tried hadoop-metrics2-s3a-file-system.properties,hadoop-metrics2.properties\n", "23/12/14 21:40:06 WARN BlockManager: Block rdd_20_0 already exists on this machine; not re-adding it\n", " \r" ] } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, data = etl_pipeline.run(config=config)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 download data from s3 and check the result" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "s3a://581f4bedcaf24703b248e73d4ecefabd/deduplicate.parquet\n" ] } ], "source": [ "# aws s3 path\n", "print(save_path)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/html": [ "<div>\n", "<style scoped>\n", " .dataframe tbody tr th:only-of-type {\n", " vertical-align: middle;\n", " }\n", "\n", " .dataframe tbody tr th {\n", " vertical-align: top;\n", " }\n", "\n", " .dataframe thead th {\n", " text-align: right;\n", " }\n", "</style>\n", "<table border=\"1\" class=\"dataframe\">\n", " <thead>\n", " <tr style=\"text-align: right;\">\n", " <th></th>\n", " <th>text</th>\n", " </tr>\n", " </thead>\n", " <tbody>\n", " <tr>\n", " <th>0</th>\n", " <td>random text\\nduplication1</td>\n", " </tr>\n", " <tr>\n", " <th>1</th>\n", " <td>fixed text1</td>\n", " </tr>\n", " </tbody>\n", "</table>\n", "</div>" ], "text/plain": [ " text\n", "0 random text\\nduplication1\n", "1 fixed text1" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.utils.api import aws_s3_path_parse\n", "from dataverse.utils.api import aws_s3_download\n", "\n", "\n", "bucket, key = aws_s3_path_parse(save_path)\n", "aws_s3_download(\n", " bucket=bucket,\n", " key=key,\n", " local_path=os.path.join(tmp_folder.name, 'deduplicate.parquet'),\n", ")\n", "pd.read_parquet(os.path.join(tmp_folder.name, 'deduplicate.parquet'))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Remove Result at local & AWS S3" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "import shutil\n", "from dataverse.utils.api import aws_s3_delete\n", "\n", "# remove saved deduplicate.parquet\n", "shutil.rmtree(os.path.join(tmp_folder.name, 'deduplicate.parquet'))\n", "aws_s3_delete(bucket=tmp_bucket, key='deduplicate.parquet')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 ETLPipeline with `EMR`\n", "> Works good? Let's scale out with EMR!\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 run ETL Pipeline with EMR Machine\n", "> add `emr=True` to ETL pipeline. that's all! Auto handle EMR cluster for you!\n", "\n", "\n", "- set `verbose=True` to see the log of EMR cluster\n", "- return value `data` will be returned as config set by `Dataverse` EMR Manager\n", "\n", "```python\n", "# before - local\n", "spark, data = etl_pipeline(config)\n", "\n", "# after - EMR\n", "spark, config = etl_pipeline(config, emr=True)\n", "```" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "================================================================================\n", "Default instance type is [ c5.xlarge ]\n", "================================================================================\n", " vCPU: 4\n", " Memory: 8192\n", " Price: 0.088100\n", "================================================================================\n", "\n", "[ Dataverse ] step status: COMPLETED. Done.\n", "DependencyViolation occured when terminating EMR cluster. Retrying one more time\n" ] } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, config = etl_pipeline.run(config=config, emr=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from omegaconf import OmegaConf\n", "\n", "print(OmegaConf.to_yaml(config))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 download data from s3 and check the result" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/html": [ "<div>\n", "<style scoped>\n", " .dataframe tbody tr th:only-of-type {\n", " vertical-align: middle;\n", " }\n", "\n", " .dataframe tbody tr th {\n", " vertical-align: top;\n", " }\n", "\n", " .dataframe thead th {\n", " text-align: right;\n", " }\n", "</style>\n", "<table border=\"1\" class=\"dataframe\">\n", " <thead>\n", " <tr style=\"text-align: right;\">\n", " <th></th>\n", " <th>text</th>\n", " </tr>\n", " </thead>\n", " <tbody>\n", " <tr>\n", " <th>0</th>\n", " <td>random text\\nduplication1</td>\n", " </tr>\n", " <tr>\n", " <th>1</th>\n", " <td>fixed text1</td>\n", " </tr>\n", " </tbody>\n", "</table>\n", "</div>" ], "text/plain": [ " text\n", "0 random text\\nduplication1\n", "1 fixed text1" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.utils.api import aws_s3_path_parse\n", "from dataverse.utils.api import aws_s3_download\n", "\n", "\n", "bucket, key = aws_s3_path_parse(save_path)\n", "aws_s3_download(\n", " bucket=bucket,\n", " key=key,\n", " local_path=os.path.join(tmp_folder.name, 'deduplicate.parquet'),\n", ")\n", "pd.read_parquet(os.path.join(tmp_folder.name, 'deduplicate.parquet'))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 Remove Result at local & AWS S3" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "import shutil\n", "from dataverse.utils.api import aws_s3_delete\n", "\n", "# remove saved deduplicate.parquet\n", "shutil.rmtree(os.path.join(tmp_folder.name, 'deduplicate.parquet'))\n", "aws_s3_delete(bucket=tmp_bucket, key='deduplicate.parquet')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Set `EMR` custom config\n", "> Wanna customize your EMR cluster? Let's do it!\n", "\n", "```python\n", "from dataverse.config import Config\n", "\n", "# if you have your own EMR cluster, you can set your own EMR cluster config\n", "config = Config.default(emr=True)\n", "config.emr.id = 'j-XXXXXXXXXXXXX'(your emr cluster id)\n", "```" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " master: local[10]\n", " appname: default\n", " driver:\n", " memory: 8G\n", " maxResultSize: 2G\n", " executor:\n", " memory: 1G\n", " local:\n", " dir: /root/.cache/dataverse/tmp\n", " ui:\n", " port: 4040\n", "etl:\n", "- name: data_ingestion___parquet___pq2ufl\n", " args:\n", " path: s3a://576768809f8a4181b034ef7921613d41/duplicate.json\n", " repartition: 1\n", "- name: deduplication___common_crawl___exact_line\n", "- name: test___add___one\n", "- name: data_load___parquet___ufl2parquet\n", " args:\n", " save_path: s3a://576768809f8a4181b034ef7921613d41/deduplicate.parquet\n", "emr:\n", " id: null\n", " working_dir: null\n", " name: dataverse_emr\n", " release: emr-6.15.0\n", " idle_timeout: 3600\n", " master_instance:\n", " type: null\n", " core_instance:\n", " type: null\n", " count: 5\n", " task_instance:\n", " type: null\n", " count: 0\n", " auto_generated: null\n", " role:\n", " ec2:\n", " name: null\n", " policy_arns: null\n", " emr:\n", " name: null\n", " policy_arns: null\n", " instance_profile:\n", " name: null\n", " ec2_role: null\n", " vpc:\n", " id: null\n", " subnet:\n", " id: null\n", " public_id: null\n", " private_id: null\n", " public: true\n", " security_group:\n", " id: null\n", " gateway:\n", " id: null\n", " route_table:\n", " id: null\n", " elastic_ip:\n", " id: null\n", " nat_gateway:\n", " id: null\n", "\n" ] } ], "source": [ "from dataverse.config import Config\n", "from omegaconf import OmegaConf\n", "\n", "load_path = f\"s3a://{tmp_bucket}/duplicate.json\"\n", "save_path = f\"s3a://{tmp_bucket}/deduplicate.parquet\"\n", "\n", "# TODO: add `emr=True` to get the emr config\n", "# =========================================\n", "config = Config.default(emr=True)\n", "# =========================================\n", "\n", "config.etl.append({\n", " 'name': 'data_ingestion___parquet___pq2ufl',\n", " 'args': {\n", " 'path': load_path,\n", " 'repartition': 1\n", " }}\n", ")\n", "config.etl.append({'name': 'deduplication___common_crawl___exact_line'})\n", "config.etl.append({'name': 'test___add___one'})\n", "config.etl.append({\n", " 'name': 'data_load___parquet___ufl2parquet',\n", " 'args': {'save_path': save_path}})\n", "\n", "# TODO: add `emr=True` to get the emr config\n", "# =========================================\n", "config.emr.core_instance.count = 5\n", "\n", "# TODO: there are more config options for emr\n", "# check `dataverse.config.Config.default`\n", "# =========================================\n", "\n", "print(OmegaConf.to_yaml(config))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, config = etl_pipeline.run(config=config, emr=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 download data from s3 and check the result" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/html": [ "<div>\n", "<style scoped>\n", " .dataframe tbody tr th:only-of-type {\n", " vertical-align: middle;\n", " }\n", "\n", " .dataframe tbody tr th {\n", " vertical-align: top;\n", " }\n", "\n", " .dataframe thead th {\n", " text-align: right;\n", " }\n", "</style>\n", "<table border=\"1\" class=\"dataframe\">\n", " <thead>\n", " <tr style=\"text-align: right;\">\n", " <th></th>\n", " <th>text</th>\n", " </tr>\n", " </thead>\n", " <tbody>\n", " <tr>\n", " <th>0</th>\n", " <td>random text\\nduplication1</td>\n", " </tr>\n", " <tr>\n", " <th>1</th>\n", " <td>fixed text1</td>\n", " </tr>\n", " </tbody>\n", "</table>\n", "</div>" ], "text/plain": [ " text\n", "0 random text\\nduplication1\n", "1 fixed text1" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.utils.api import aws_s3_path_parse\n", "from dataverse.utils.api import aws_s3_download\n", "\n", "\n", "bucket, key = aws_s3_path_parse(save_path)\n", "aws_s3_download(\n", " bucket=bucket,\n", " key=key,\n", " local_path=os.path.join(tmp_folder.name, 'deduplicate.parquet'),\n", ")\n", "pd.read_parquet(os.path.join(tmp_folder.name, 'deduplicate.parquet'))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Remove Temporary Data & Environment\n", "> it's time to clean up" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "from dataverse.utils.api import aws_s3_delete\n", "from dataverse.utils.api import aws_s3_delete_bucket\n", "\n", "!rm dynamic_etl.py\n", "\n", "# remove temp folder\n", "tmp_folder.cleanup()\n", "\n", "# remove temp bucket\n", "aws_s3_delete(bucket=tmp_bucket, key='')\n", "aws_s3_delete_bucket(bucket=tmp_bucket)" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/EX_use_common_crawl_data.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Use Common Crawl Data\n", "> How to use common crawl data? There is 2 ways to achieve this" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Dump-ID\n", "> common crawl dump id related to the date of the crawl. ex: 2023-23" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: CommonCrawl\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___common_crawl___dump2raw\n", " args:\n", " dump: 2023-23\n", " segment_n: 1\n", "- name: data_ingestion___common_crawl___raw2ufl\n", "- name: cleaning___normalization___number\n", "- name: deduplication___common_crawl___exact_line\n", "- name: quality___language___fasttext_filter\n", " args:\n", " whitelist:\n", " - ko\n", " threshold: 0.5\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'CommonCrawl',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___common_crawl___dump2raw',\n", " 'args': {\n", " 'dump': \"2023-23\",\n", " 'segment_n': 1,\n", " }\n", " },\n", " {'name': 'data_ingestion___common_crawl___raw2ufl'},\n", " {'name': 'cleaning___normalization___number'},\n", " {'name': 'deduplication___common_crawl___exact_line'},\n", " {\n", " 'name': 'quality___language___fasttext_filter',\n", " 'args': {\n", " 'whitelist': ['ko'],\n", " 'threshold': 0.5,\n", " }\n", " },\n", " {'name': 'data_save___huggingface___ufl2hf_obj'}\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "23/11/14 22:09:41 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "23/11/14 22:09:41 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading and preparing dataset spark/-572665896 to /root/.cache/huggingface/datasets/spark/-572665896/0.0.0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset spark downloaded and prepared to /root/.cache/huggingface/datasets/spark/-572665896/0.0.0. Subsequent calls will reuse this data.\n" ] }, { "data": { "text/plain": [ "Dataset({\n", " features: ['id', 'meta', 'name', 'text'],\n", " num_rows: 292\n", "})" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)\n", "dataset" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'id': '19ee2ac082ef11eeae4262800acfdc4f',\n", " 'meta': '{\"title\": \"\\\\uc640\\\\uae00\\\\uc640\\\\uae00 - \\\\uc7ac\\\\ubbf8\", \"url\": \"http://wagle.isplus.joins.com/app/index.php?mid=wg_fun&page=6\", \"date_download\": \"2023-06-05T00:45:09Z\", \"digest\": \"sha1:UDASCLMI7FRAUR5PKBHJZ6DZSBZPZTFI\", \"length\": 2557, \"nlines\": 45, \"source_domain\": \"wagle.isplus.joins.com\", \"cc_segment\": \"crawl-data/CC-MAIN-2023-23/segments/1685224650409.64/wet/CC-MAIN-20230604225057-20230605015057-00644.warc.wet.gz\"}',\n", " 'name': 'common_crawl',\n", " 'text': \"조인스\\n와글와글 전체 목록\\n조회\\n0000 매경기 재평가되는 맨유짤 더레즈 0000-00-00\\n0000 아스날이 0경기 0승을 한 이유? 구0000너 0000-00-00\\n0000 은근 축구 혼자서 다하는 선수 풋스타 0000-00-00\\n0000 [오피셜] 아스날, 리그 0위로 0라운드 종료 아스날아.. 0000-00-00\\n0000 [놀람] 놀랄 수 밖에 없는 첼시 선발라인업 케파멘디 0000-00-00\\n0000 [감동] 분데스리가 00번의 과거와 미래 포항항 0000-00-00\\n0000 ???:너네들 재미있어보이네~ 에밀홀딩 0000-00-00\\n0000 [정보]0000년 0회 이상 우승한팀 어우뮌x0 0000-00-00\\n0000 커뮤니티실드에서의 리버풀 해리킼웰 0000-00-00\\n0000 [유머] ?? : 아스날... 생각보다 강팀이잖아..? 금발롱 0000-00-00\\n0000 (감동)??:우....승...뭐라고? 티아구메.. 0000-00-00\\n0000 ????:우승팀이 이정도라니 나설필요가 없겠는걸 '질'? 0년0우승.. 0000-00-00\\n0000 위닝의 저주(?) 베르바턴 0000-00-00\\n0000 [유머] 현시점 최강팀 보이빕 0000-00-00\\n0000 다시보는 바르셀로나 보드진 영입 큰일은바.. 0000-00-00\\n0000 “축구의 신” 뮌헨콜라 0000-00-00\\n0000 라리가 0형제.jpg 헤르니고르 0000-00-00\\n0000 ???: 어이! 바르샤, 한잔 해~! 사비에르 0000-00-00\\n0000 그래도 아직 레바뮌 맞지 ㅋㅋ 킴미희 0000-00-00\\n0000 뮌헨-돌문-맨유 내리갈굼 퓰리식혜 0000-00-00\\n0000 최근 펩과르디올라 챔스 성적ㅋㅋ 펩몬드 0000-00-00\\n0000 ?????: 만나서 반갑다 00000 후안펩시 0000-00-00\\n0000 챔스 아탈란타 상대로 유일하게 클린시트한 키퍼 누구? 안녕카일 0000-00-00\\n0000 PSG VS 아탈란타 네이마르 요약 축신마르 0000-00-00\\n0000 맨유팬의 불타는 행복회로 에덴하자드 0000-00-00\\n0000 최근 경기당 0골씩 넣고 있는 축구선수 썬가드 0000-00-00\\n0000 [유머] 램파드 인터뷰떴다! 램램반장 0000-00-00\\n0000 [감동] ??? : 우...승...뭐라고? 아스날팬임 0000-00-00\\n0000 ???: 야 토트넘! 라이트구너 0000-00-00\\n0000 드디어 완성된 PL 빅0 사네는뮌.. 0000-00-00\\n0000 [유머] 와 아스날 매수ㅋㅋㅋ 김티어니 0000-00-00\\n0000 ???: 하늘은 왜 아스날을 낳고 베베루니 0000-00-00\\n0000 00/00시즌 린가드 유니버스 끝 린가디 0000-00-00\\n0000 0류 맨유 팬카페 맨유더마스 0000-00-00\\n0000 프리미어리그 운명공동체 0팀 AlMacdo 0000-00-00\\n쓰기\"}" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 WET folder\n", "> use pre-downloaded WET files\n", "\n", "We are going to use the cache common crawl as we just downloaded while processing dump-id ETL example right before. Time to use it!" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "from dataverse.utils.setting import SystemSetting\n", "from pathlib import Path" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "An error occurred (InvalidClientTokenId) when calling the GetCallerIdentity operation: The security token included in the request is invalid\n" ] } ], "source": [ "wet_path = Path(SystemSetting().CACHE_DIR) / '.cache' / 'dataverse' / 'dataset' / 'common_crawl_2023-23'" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: CommonCrawl\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___common_crawl___wet2raw\n", " args:\n", " wet_path: /root/.cache/dataverse/dataset/common_crawl_2023-23\n", "- name: data_ingestion___common_crawl___raw2ufl\n", "- name: cleaning___normalization___number\n", "- name: deduplication___common_crawl___exact_line\n", "- name: quality___language___fasttext_filter\n", " args:\n", " whitelist:\n", " - ko\n", " threshold: 0.5\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'CommonCrawl',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___common_crawl___wet2raw',\n", " 'args': {\n", " 'wet_path': str(wet_path),\n", " }\n", " },\n", " {'name': 'data_ingestion___common_crawl___raw2ufl'},\n", " {'name': 'cleaning___normalization___number'},\n", " {'name': 'deduplication___common_crawl___exact_line'},\n", " {\n", " 'name': 'quality___language___fasttext_filter',\n", " 'args': {\n", " 'whitelist': ['ko'],\n", " 'threshold': 0.5,\n", " }\n", " },\n", " {'name': 'data_save___huggingface___ufl2hf_obj'}\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/14 22:10:11 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading and preparing dataset spark/-1399168669 to /root/.cache/huggingface/datasets/spark/-1399168669/0.0.0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset spark downloaded and prepared to /root/.cache/huggingface/datasets/spark/-1399168669/0.0.0. Subsequent calls will reuse this data.\n" ] }, { "data": { "text/plain": [ "Dataset({\n", " features: ['id', 'meta', 'name', 'text'],\n", " num_rows: 292\n", "})" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)\n", "dataset" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'id': '29551d2082ef11eea9d462800acfdc4f',\n", " 'meta': '{\"title\": \"\\\\uc640\\\\uae00\\\\uc640\\\\uae00 - \\\\uc7ac\\\\ubbf8\", \"url\": \"http://wagle.isplus.joins.com/app/index.php?mid=wg_fun&page=6\", \"date_download\": \"2023-06-05T00:45:09Z\", \"digest\": \"sha1:UDASCLMI7FRAUR5PKBHJZ6DZSBZPZTFI\", \"length\": 2557, \"nlines\": 45, \"source_domain\": \"wagle.isplus.joins.com\", \"cc_segment\": \"/root/.cache/dataverse/dataset/common_crawl_2023-23/CC-MAIN-20230604225057-20230605015057-00644.warc.wet.gz\"}',\n", " 'name': 'common_crawl',\n", " 'text': \"조인스\\n와글와글 전체 목록\\n조회\\n0000 매경기 재평가되는 맨유짤 더레즈 0000-00-00\\n0000 아스날이 0경기 0승을 한 이유? 구0000너 0000-00-00\\n0000 은근 축구 혼자서 다하는 선수 풋스타 0000-00-00\\n0000 [오피셜] 아스날, 리그 0위로 0라운드 종료 아스날아.. 0000-00-00\\n0000 [놀람] 놀랄 수 밖에 없는 첼시 선발라인업 케파멘디 0000-00-00\\n0000 [감동] 분데스리가 00번의 과거와 미래 포항항 0000-00-00\\n0000 ???:너네들 재미있어보이네~ 에밀홀딩 0000-00-00\\n0000 [정보]0000년 0회 이상 우승한팀 어우뮌x0 0000-00-00\\n0000 커뮤니티실드에서의 리버풀 해리킼웰 0000-00-00\\n0000 [유머] ?? : 아스날... 생각보다 강팀이잖아..? 금발롱 0000-00-00\\n0000 (감동)??:우....승...뭐라고? 티아구메.. 0000-00-00\\n0000 ????:우승팀이 이정도라니 나설필요가 없겠는걸 '질'? 0년0우승.. 0000-00-00\\n0000 위닝의 저주(?) 베르바턴 0000-00-00\\n0000 [유머] 현시점 최강팀 보이빕 0000-00-00\\n0000 다시보는 바르셀로나 보드진 영입 큰일은바.. 0000-00-00\\n0000 “축구의 신” 뮌헨콜라 0000-00-00\\n0000 라리가 0형제.jpg 헤르니고르 0000-00-00\\n0000 ???: 어이! 바르샤, 한잔 해~! 사비에르 0000-00-00\\n0000 그래도 아직 레바뮌 맞지 ㅋㅋ 킴미희 0000-00-00\\n0000 뮌헨-돌문-맨유 내리갈굼 퓰리식혜 0000-00-00\\n0000 최근 펩과르디올라 챔스 성적ㅋㅋ 펩몬드 0000-00-00\\n0000 ?????: 만나서 반갑다 00000 후안펩시 0000-00-00\\n0000 챔스 아탈란타 상대로 유일하게 클린시트한 키퍼 누구? 안녕카일 0000-00-00\\n0000 PSG VS 아탈란타 네이마르 요약 축신마르 0000-00-00\\n0000 맨유팬의 불타는 행복회로 에덴하자드 0000-00-00\\n0000 최근 경기당 0골씩 넣고 있는 축구선수 썬가드 0000-00-00\\n0000 [유머] 램파드 인터뷰떴다! 램램반장 0000-00-00\\n0000 [감동] ??? : 우...승...뭐라고? 아스날팬임 0000-00-00\\n0000 ???: 야 토트넘! 라이트구너 0000-00-00\\n0000 드디어 완성된 PL 빅0 사네는뮌.. 0000-00-00\\n0000 [유머] 와 아스날 매수ㅋㅋㅋ 김티어니 0000-00-00\\n0000 ???: 하늘은 왜 아스날을 낳고 베베루니 0000-00-00\\n0000 00/00시즌 린가드 유니버스 끝 린가디 0000-00-00\\n0000 0류 맨유 팬카페 맨유더마스 0000-00-00\\n0000 프리미어리그 운명공동체 0팀 AlMacdo 0000-00-00\\n쓰기\"}" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 WET folder - Add MinhashLSH fuzzy deduplication\n", "> same but more preprocessing! \n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "spark:\n", " appname: CommonCrawl\n", " driver:\n", " memory: 16g\n", "etl:\n", "- name: data_ingestion___common_crawl___wet2raw\n", " args:\n", " wet_path: /root/.cache/dataverse/dataset/common_crawl_2023-23\n", "- name: data_ingestion___common_crawl___raw2ufl\n", "- name: cleaning___normalization___number\n", "- name: deduplication___minhash___lsh_jaccard\n", "- name: deduplication___common_crawl___exact_line\n", "- name: quality___language___fasttext_filter\n", " args:\n", " whitelist:\n", " - ko\n", " threshold: 0.5\n", "- name: data_save___huggingface___ufl2hf_obj\n", "\n" ] } ], "source": [ "from omegaconf import OmegaConf\n", "\n", "# load from dict\n", "ETL_config = OmegaConf.create({\n", " 'spark': {\n", " 'appname': 'CommonCrawl',\n", " 'driver': {'memory': '16g'},\n", " },\n", " 'etl': [\n", " {\n", " 'name': 'data_ingestion___common_crawl___wet2raw',\n", " 'args': {\n", " 'wet_path': str(wet_path),\n", " }\n", " },\n", " {'name': 'data_ingestion___common_crawl___raw2ufl'},\n", " {'name': 'cleaning___normalization___number'},\n", " {'name': 'deduplication___minhash___lsh_jaccard'},\n", " {'name': 'deduplication___common_crawl___exact_line'},\n", " {\n", " 'name': 'quality___language___fasttext_filter',\n", " 'args': {\n", " 'whitelist': ['ko'],\n", " 'threshold': 0.5,\n", " }\n", " },\n", " {'name': 'data_save___huggingface___ufl2hf_obj'}\n", " ]\n", "})\n", "\n", "print(OmegaConf.to_yaml(ETL_config))" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/14 22:10:34 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading and preparing dataset spark/2085970941 to /root/.cache/huggingface/datasets/spark/2085970941/0.0.0...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset spark downloaded and prepared to /root/.cache/huggingface/datasets/spark/2085970941/0.0.0. Subsequent calls will reuse this data.\n" ] }, { "data": { "text/plain": [ "Dataset({\n", " features: ['id', 'meta', 'name', 'text'],\n", " num_rows: 285\n", "})" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "\n", "# raw -> hf_obj\n", "spark, dataset = etl_pipeline.run(ETL_config)\n", "dataset" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'id': '3aa3dddc82ef11ee898d62800acfdc4f',\n", " 'meta': '{\"title\": \"\\\\ub3d9\\\\uc601\\\\uc0c1 | \\\\uc6b0\\\\ub9ac \\\\ud568\\\\uaed8 \\\\ub9cc\\\\ub4e4\\\\uc5b4 \\\\ubd05\\\\uc2dc\\\\ub2e4.(57.\\\\ud478\\\\ucd08\\\\ubcf6\\\\uc74c)\\\\u200b\", \"url\": \"https://dprktoday.com/videos/16055?list=\", \"date_download\": \"2023-06-05T01:01:45Z\", \"digest\": \"sha1:6TKZ4VWGQESC6HVNGQS3ESIE4BR63V25\", \"length\": 4007, \"nlines\": 317, \"source_domain\": \"dprktoday.com\", \"cc_segment\": \"/root/.cache/dataverse/dataset/common_crawl_2023-23/CC-MAIN-20230604225057-20230605015057-00644.warc.wet.gz\"}',\n", " 'name': 'common_crawl',\n", " 'text': '첫페지로\\n날자별열람\\n손전화홈페지열람기\\n조선어 English 中国语 Русский\\n정치\\n경제\\n군사\\n사회문화\\n조국통일\\n관광\\n력사\\n로작\\n기 사\\n동영상\\n사 진\\n음악감상\\n전체\\n혁명활동소식\\n기록영화\\n회고록《세기와 더불어》\\n《조선의 오늘》동영상\\n조선중앙TV\\nU C C\\n국제친선전람관을 찾아서 |\\n국가선물관을 찾아서 |\\n특집 |\\n생활의 랑만과 정서 |\\n미덕의 향기 |\\n인물소개 |\\n예술공연 |\\n아동무대 |\\n조선영화 |\\nTV예술영화 |\\nTV련속소설 |\\nTV련속극 |\\nTV극 |\\nTV기록영화 |\\nTV기록편집물 |\\n사이프로편집물 |\\n만화영화 |\\n인기동영상 |\\n화면취재시간 |\\n민족의 자취를 찾아서 |\\n우리함께 |\\n조선의 숨결 |\\n이 시각 평양, 그 한토막 |\\n나는 좋아요 |\\n료리백과 |\\n[료리만들기]\\n우리 함께 만들어 봅시다.(00.푸초볶음)\\u200b\\n0 0:00 [0000-00-00]\\n온면\\n돼지고기졸임\\n봄철음식 -달래무우김치, 냉이고추장무침-\\n감자가루군만두\\n닭알료리, 청포채\\n0 0분 [0000-00-00]\\n우리 함께 만들어 봅시다.(000.뜨더국)\\n우리 함께 만들어 봅시다.(000.닭위졸임)\\n우리 함께 만들어 봅시다.(000.미꾸라지풋고추졸임)\\n0 0:0 [0000-00-00]\\n우리 함께 만들어 봅시다.(000.무우채김치)\\n우리 함께 만들어 봅시다.(000.칼제비국)\\n우리 함께 만들어 봅시다.(000.고등어졸임)\\n우리 함께 만들어 봅시다.(000.록두묵채)\\n|\\n감상글(0) |\\n동영상보기 |\\n추천하기\\n료리만들기 000건\\n0분 00초\\n0 [0000-00-00]\\n0분\\n0분 0초\\n←되돌이\\n현대조선을 빛내이신 절세위인들 | 회고록 《세기와 더불어》 | 정치 | 경제 | 군사 | 사회문화 | 조국통일 | 관광 | 력사\\n기사 | 동영상 | 사진 | 음악감상 | 통일신보 | 다매체편집물 | 도서 | 도서련재 | 록음물 | 그림책 | 조선우표 | 조선미술 | 명제품 | 특산료리 | 독자목소리 | 감상글\\n홈페지봉사에 관한 문의\\nCopyright© 0000-0000 《평양모란봉편집사》 All Rights Reserved'}" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[0]" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/EX_use_pyspark_ui.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Use Pyspark UI\n", "> you can use pyspark UI to monitor the spark job." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🌌 Using in Docker Environment" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 🌠 when pyspark default port `4040` is not available\n", "> In a Docker environment, to access PySpark's UI, the port PySpark's UI runs on inside the Docker container should be mapped to a certain port on your host machine to make it accessible. By default, PySpark's UI runs on port `4040` inside the container. If this port is not available, you can configure PySpark to use a different port." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### pyspark UI attempt with `4040` (default)" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Setting default log level to \"WARN\".\n", "To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n", "23/11/18 07:22:15 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n", "23/11/18 07:22:16 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n", "23/11/18 07:22:18 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.\n" ] } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, data = etl_pipeline.sample()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "if you access `https://{your_ip_address}:4040` and nothing will be shown" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# stop the spark session after you are done\n", "spark.stop()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### change pyspark UI port to which is `available`\n", "> the point here is to let you know that you can change the port to the port you want\n", "\n", "- here for example, let's assume `30360` port is available" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "23/11/18 07:22:23 WARN SparkConf: Note that spark.local.dir will be overridden by the value set by the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS in YARN).\n" ] } ], "source": [ "from dataverse.etl import ETLPipeline\n", "\n", "etl_pipeline = ETLPipeline()\n", "spark, data = etl_pipeline.sample(config={'spark': {'ui': {'port': 30360}}})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "if you access `https://{your_ip_address}:30360` and voila! you can see the pyspark UI" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " \r" ] }, { "data": { "text/plain": [ "100" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# check out the changes in the UI\n", "data.count()" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "# stop the spark session after you are done\n", "spark.stop()" ] } ], "metadata": { "kernelspec": { "display_name": "llm", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: examples/etl/README.md ================================================ # 🗺️ ETL (Extract, Transform, Load) ================================================ FILE: requirements.txt ================================================ requests numpy pandas fasttext-wheel omegaconf pyarrow==14.0.1 datasets pyspark scipy trafilatura html2text faker awscli boto3 pre-commit==3.6.0 botocore rsa s3transfer isort pytest graphframes-latest ================================================ FILE: setup.py ================================================ import os from setuptools import find_packages, setup basedir = os.path.abspath(os.path.dirname(__file__)) requirements_path = os.path.join(basedir, "requirements.txt") def get_requirements(): """Get package requirements from a requirements file (ex: requirements.txt).""" with open(requirements_path, "r") as f: return f.read().splitlines() def get_extras_require(): extras_require = { "aws": [ "awscli==1.32.36", "botocore==1.34.36", "rsa==4.7.2", "s3transfer==0.10.0", ], "dev": [ "black==22.12.0", "isort>=5.10.1", "flake8>=4.0.1", "pytest>=7.4.4", "pre-commit==3.6.0", ], } extras_require.update({"all": [i[0] for i in extras_require.values()]}) return extras_require setup( name="dataverse", version="1.0.4", packages=find_packages(), author="Dataverse Team", author_email="dataverse@upstage.ai", description="An open-source simplifies ETL workflow with Python based on Spark", license="Apache License 2.0", include_package_data=True, install_requires=get_requirements(), entry_points={"console_scripts": ["dataverse = dataverse.api.cli:main"]}, )