Repository: airbytehq/quickstarts
Branch: main
Commit: d00a63074425
Files: 753
Total size: 4.4 MB
Directory structure:
gitextract_zp7ng51c/
├── .devcontainer/
│ ├── README.md
│ └── devcontainer.json
├── .gitignore
├── CONTRIBUTING.md
├── README.md
├── airbyte_dbt_airflow_bigquery/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ └── ecommerce/
│ │ │ ├── marts/
│ │ │ │ ├── product_popularity.sql
│ │ │ │ ├── purchase_patterns.sql
│ │ │ │ ├── schema.yml
│ │ │ │ └── user_demographics.sql
│ │ │ ├── sources/
│ │ │ │ └── faker_sources.yml
│ │ │ └── staging/
│ │ │ ├── schema.yml
│ │ │ ├── stg_products.sql
│ │ │ ├── stg_purchases.sql
│ │ │ └── stg_users.sql
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ ├── .gitkeep
│ │ │ ├── raw_customers.csv
│ │ │ ├── raw_orders.csv
│ │ │ └── raw_payments.csv
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ ├── terraform.tfvars
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── .gitignore
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── airflow/
│ │ │ ├── config/
│ │ │ │ └── dbt_config.py
│ │ │ ├── dags/
│ │ │ │ └── elt_dag.py
│ │ │ └── plugins/
│ │ │ ├── custom_docs_plugin.py
│ │ │ ├── dbt_upload_docs.py
│ │ │ ├── static/
│ │ │ │ └── .gitkeep
│ │ │ └── templates/
│ │ │ └── dbt/
│ │ │ └── .gitkeep
│ │ ├── docker-compose.yaml
│ │ └── requirements.txt
│ └── setup.py
├── airbyte_dbt_airflow_snowflake/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── example/
│ │ │ │ ├── my_first_dbt_model.sql
│ │ │ │ ├── my_second_dbt_model.sql
│ │ │ │ └── schema.yml
│ │ │ └── sources.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── .gitignore
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── airflow/
│ │ │ ├── dags/
│ │ │ │ └── my_elt_dag.py
│ │ │ └── plugins/
│ │ │ ├── static/
│ │ │ │ └── .gitkeep
│ │ │ └── templates/
│ │ │ └── dbt/
│ │ │ └── .gitkeep
│ │ ├── docker-compose.yaml
│ │ └── requirements.txt
│ └── setup.py
├── airbyte_dbt_dagster/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ └── example/
│ │ │ ├── my_first_dbt_model.sql
│ │ │ ├── my_second_dbt_model.sql
│ │ │ └── schema.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── airbyte_dbt_dagster_snowflake/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── example/
│ │ │ │ ├── my_first_dbt_model.sql
│ │ │ │ ├── my_second_dbt_model.sql
│ │ │ │ └── schema.yml
│ │ │ └── sources.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── airbyte_dbt_prefect_bigquery/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── product_popularity.sql
│ │ │ │ ├── purchase_patterns.sql
│ │ │ │ └── user_demographics.sql
│ │ │ ├── sources/
│ │ │ │ └── faker_sources.yml
│ │ │ └── staging/
│ │ │ ├── stg_products.sql
│ │ │ ├── stg_purchases.sql
│ │ │ └── stg_users.sql
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ └── my_elt_flow.py
│ └── setup.py
├── airbyte_dbt_prefect_snowflake/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── example/
│ │ │ │ ├── my_first_dbt_model.sql
│ │ │ │ ├── my_second_dbt_model.sql
│ │ │ │ └── schema.yml
│ │ │ └── sources.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ └── my_elt_flow.py
│ └── setup.py
├── airbyte_dbt_snowflake_looker/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── example/
│ │ │ │ ├── my_first_dbt_model.sql
│ │ │ │ ├── my_second_dbt_model.sql
│ │ │ │ └── schema.yml
│ │ │ └── sources.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── airbyte_lib_notebooks/
│ ├── AirbyteLib_Basic_Features_Demo.ipynb
│ ├── AirbyteLib_CoinAPI_Demo.ipynb
│ ├── AirbyteLib_GA4_Demo.ipynb
│ ├── AirbyteLib_Github_Incremental_Demo.ipynb
│ ├── PyAirbyte_Postgres_Custom_Cache_Demo.ipynb
│ ├── PyAirbyte_Shopify_Demo.ipynb
│ └── README.md
├── airbyte_s3_pinecone_rag/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ └── purchase_data.sql
│ │ │ ├── sources/
│ │ │ │ └── s3.source.yml
│ │ │ └── staging/
│ │ │ └── stg_purchases.sql
│ │ └── profiles.yml
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── output.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── query.py
│ ├── quickstart.md
│ └── setup.py
├── api_to_warehouse/
│ ├── .gitignore
│ ├── Readme.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── customer_segmentation_analytics_shopify/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ ├── customer_activity_analysis.py
│ │ │ ├── purchase_pattern_segmentation_analysis.py
│ │ │ └── rfm_segmentation_analysis.py
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── customer_activity.sql
│ │ │ │ ├── purchase_pattern_segmentation.sql
│ │ │ │ └── rfm_segmentation.sql
│ │ │ ├── sources/
│ │ │ │ └── shopify_sources.yml
│ │ │ └── staging/
│ │ │ ├── stg_customers.sql
│ │ │ └── stg_transactions.sql
│ │ └── profiles.yml
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── data_to_pinecone_llm/
│ ├── .gitignore
│ ├── .vscode/
│ │ └── quickstart.code-workspace
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── notion.source.yml
│ │ │ └── notion_data.sql
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── output.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── query.py
│ ├── quickstart.md
│ ├── secrets/
│ │ ├── .gitignore
│ │ └── README.md
│ └── setup.py
├── database_snapshot/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── connections/
│ │ │ ├── main.tf
│ │ │ ├── provider.tf
│ │ │ └── variables.tf
│ │ ├── destinations/
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── provider.tf
│ │ │ └── variables.tf
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ ├── sources/
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── provider.tf
│ │ │ └── variables.tf
│ │ └── variables.tf
│ └── setup.py
├── developer_productivity_analytics_github/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── avarage_time_to_merge_pr_analysis.sql
│ │ │ │ ├── commits_over_time_per_dev_analysis.sql
│ │ │ │ ├── dev_activity_by_day_of_week_analysis.sql
│ │ │ │ ├── dev_collaboration_network_analysis.sql
│ │ │ │ ├── freq_of_code_contribution_analysis.sql
│ │ │ │ ├── no_of_code_reviews_per_dev_analysis.sql
│ │ │ │ ├── no_of_commits_per_dev_per_repo_analysis.sql
│ │ │ │ ├── no_of_pr_per_dev_analysis.sql
│ │ │ │ ├── number_of_pr_open_or_closed.sql
│ │ │ │ ├── top_collaborators_by_repo_analysis.sql
│ │ │ │ └── track_issues_assigned_by_dev_analysis.sql
│ │ │ ├── sources/
│ │ │ │ └── github_source.yml
│ │ │ └── staging/
│ │ │ ├── stg_branches.sql
│ │ │ ├── stg_collaborators.sql
│ │ │ ├── stg_comments.sql
│ │ │ ├── stg_commits.sql
│ │ │ ├── stg_issues.sql
│ │ │ ├── stg_organizations.sql
│ │ │ ├── stg_pull_requests.sql
│ │ │ ├── stg_repositories.sql
│ │ │ ├── stg_review_comments.sql
│ │ │ ├── stg_reviews.sql
│ │ │ ├── stg_stargazers.sql
│ │ │ ├── stg_tags.sql
│ │ │ ├── stg_teams.sql
│ │ │ └── stg_users.sql
│ │ ├── profiles.yml
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── ecommerce_analytics_bigquery/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── product_popularity.sql
│ │ │ │ ├── purchase_patterns.sql
│ │ │ │ └── user_demographics.sql
│ │ │ ├── sources/
│ │ │ │ └── faker_sources.yml
│ │ │ └── staging/
│ │ │ ├── stg_products.sql
│ │ │ ├── stg_purchases.sql
│ │ │ └── stg_users.sql
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── elt_simplified_stack/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── commits-per-repo.sql
│ │ │ │ ├── pr-per-dev.sql
│ │ │ │ └── pr-per-status.sql
│ │ │ ├── sources/
│ │ │ │ └── github_source.yml
│ │ │ └── staging/
│ │ │ ├── stg_commits.sql
│ │ │ └── stg_pull_requests.sql
│ │ ├── profiles.yml
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ └── my_elt_flow.py
│ └── setup.py
├── error_analysis_stack_sentry/
│ ├── Readme.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── example/
│ │ │ │ ├── Insight_Table.sql
│ │ │ │ └── schema.yml
│ │ │ └── sources.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── error_analysis_stack.egg-info/
│ │ ├── PKG-INFO
│ │ ├── SOURCES.txt
│ │ ├── dependency_links.txt
│ │ ├── requires.txt
│ │ └── top_level.txt
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── orchestration.egg-info/
│ │ │ ├── PKG-INFO
│ │ │ ├── SOURCES.txt
│ │ │ ├── dependency_links.txt
│ │ │ ├── requires.txt
│ │ │ └── top_level.txt
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── github_insight_stack/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── Readme.md
│ │ │ ├── sources.yml
│ │ │ └── test-models/
│ │ │ ├── code_quality.sql
│ │ │ ├── collaboration_patterns.sql
│ │ │ └── project_health.sql
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── low_latency_data_availability/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── mongodb_mysql_integration/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── multisource_aggregation/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── macros/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── example/
│ │ │ │ ├── my_first_dbt_model.sql
│ │ │ │ ├── my_second_dbt_model.sql
│ │ │ │ └── schema.yml
│ │ │ └── sources.yml
│ │ ├── profiles.yml
│ │ ├── seeds/
│ │ │ └── .gitkeep
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── connections/
│ │ │ ├── main.tf
│ │ │ ├── provider.tf
│ │ │ └── variables.tf
│ │ ├── destination_warehouse/
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── provider.tf
│ │ │ └── variables.tf
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ ├── source_databases/
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── provider.tf
│ │ │ └── variables.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── mysql_to_postgres_incremental_stack/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── outdoor_activity_analytics_recreation/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analyses/
│ │ │ ├── campsite_availability_analysis.py
│ │ │ ├── campsite_type_analysis.py
│ │ │ ├── count_recareas_by_activity_analysis.py
│ │ │ └── most_common_activities_in_recareas_analysis.py
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── campsite_availability_over_time.sql
│ │ │ │ ├── campsite_type_counts.sql
│ │ │ │ ├── count_recarea_by_activity_analysis.sql
│ │ │ │ └── most_common_activities_in_recareas.sql
│ │ │ ├── sources/
│ │ │ │ └── recreation_source.yml
│ │ │ └── staging/
│ │ │ ├── stg_activities.sql
│ │ │ ├── stg_campsites.sql
│ │ │ ├── stg_facilities.sql
│ │ │ └── stg_recreationareas.sql
│ │ └── profiles.yml
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── postgres_data_replication/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── postgres_snowflake_integration/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── postgres_to_mysql_migration/
│ ├── .gitignore
│ ├── README.md
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ └── setup.py
├── pyairbyte_notebooks/
│ ├── AI ChatBot - 1.0 Launch Demo.ipynb
│ ├── Chatoverpolygonstockdata_langchain.ipynb
│ ├── PyAirbyte_Apify_Demo.ipynb
│ ├── PyAirbyte_Basic_Features_Demo.ipynb
│ ├── PyAirbyte_CoinAPI_Demo.ipynb
│ ├── PyAirbyte_Document_Creation_RAG_with_Langchain_Demo.ipynb
│ ├── PyAirbyte_GA4_Demo.ipynb
│ ├── PyAirbyte_Github_Incremental_Demo.ipynb
│ ├── PyAirbyte_Postgres_Custom_Cache_Demo.ipynb
│ ├── PyAirbyte_Shopify_Demo.ipynb
│ ├── PyAirbyte_Snowflake_Cortex_Github.ipynb
│ ├── PyAirbyte_Snowflake_Custom_Cache_Demo.ipynb
│ ├── PyAirbyte_as_an_Orchestrator_Demo.ipynb
│ ├── RAG_using_github_pyairbyte_chroma.ipynb
│ ├── README.md
│ ├── rag_using_gdrive_pyairbyte_pinecone.ipynb
│ ├── rag_using_github_pyairbyte_weaviate.ipynb
│ ├── rag_using_gitlab_pyairbyte_qdrant.ipynb
│ ├── rag_using_jira_pyairbyte_pinecone.ipynb
│ ├── rag_using_s3_pyairbyte_pinecone.ipynb
│ ├── rag_using_shopify_pyairbyte_langchain.ipynb
│ ├── rag_with_fb_marketing_milvus_lite.ipynb
│ ├── rag_with_pyairbyte_and_milvus_lite.ipynb
│ ├── sentiment_analysis_airbyte_gsheets_snowflakecortex.ipynb
│ └── using_langchain_airbyte_package.ipynb
├── satisfaction_analytics_zendesk_support/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── analysis/
│ │ │ └── .gitkeep
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── analyze_satisfaction_score_over_time.sql
│ │ │ │ ├── avarage_satisfaction_rating.sql
│ │ │ │ ├── feedback_analysis_for_low_score.sql
│ │ │ │ └── trend_analysis_by_score.sql
│ │ │ ├── sources/
│ │ │ │ └── zendesk_support_sources.yml
│ │ │ └── staging/
│ │ │ ├── stg_brands.sql
│ │ │ ├── stg_groups.sql
│ │ │ ├── stg_organizations.sql
│ │ │ ├── stg_satisfaction_ratings.sql
│ │ │ ├── stg_tags.sql
│ │ │ ├── stg_ticket_audits.sql
│ │ │ ├── stg_ticket_comments.sql
│ │ │ ├── stg_ticket_fields.sql
│ │ │ ├── stg_ticket_forms.sql
│ │ │ ├── stg_ticket_metric_events.sql
│ │ │ ├── stg_ticket_metrics.sql
│ │ │ ├── stg_tickets.sql
│ │ │ └── stg_users.sql
│ │ ├── profiles.yml
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ ├── setup.py
│ │ ├── tmp3ks7pwhz/
│ │ │ └── storage/
│ │ │ ├── ad21fadd-c131-4a7c-98a7-fa5ad3a929de/
│ │ │ │ └── compute_logs/
│ │ │ │ ├── mdvhnoik.complete
│ │ │ │ ├── mdvhnoik.err
│ │ │ │ ├── mdvhnoik.out
│ │ │ │ ├── uzgmeijp.complete
│ │ │ │ ├── uzgmeijp.err
│ │ │ │ └── uzgmeijp.out
│ │ │ └── f7507115-918d-443f-ab91-a065e84fa403/
│ │ │ └── compute_logs/
│ │ │ ├── aeebjmfa.complete
│ │ │ ├── aeebjmfa.err
│ │ │ ├── aeebjmfa.out
│ │ │ ├── zqbkkiww.complete
│ │ │ ├── zqbkkiww.err
│ │ │ └── zqbkkiww.out
│ │ └── tmpb3ctnsbk/
│ │ └── storage/
│ │ ├── 0bc4e544-546d-44df-b79c-e75413c56ecb/
│ │ │ └── compute_logs/
│ │ │ ├── xozgecli.complete
│ │ │ ├── xozgecli.err
│ │ │ ├── xozgecli.out
│ │ │ ├── yyxjctam.complete
│ │ │ ├── yyxjctam.err
│ │ │ └── yyxjctam.out
│ │ └── 1eac78ed-12d1-4147-9c48-79b27dd586ed/
│ │ └── compute_logs/
│ │ ├── iqvvuhde.complete
│ │ ├── iqvvuhde.err
│ │ ├── iqvvuhde.out
│ │ ├── izklbfmq.complete
│ │ ├── izklbfmq.err
│ │ └── izklbfmq.out
│ └── setup.py
├── shopping_cart_analytics_shopify/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── abandoned_checkout_ratio.sql
│ │ │ │ ├── location_based_abandoned_checkouts.sql
│ │ │ │ ├── most_abandoned_products.sql
│ │ │ │ └── time_based.sql
│ │ │ ├── sources/
│ │ │ │ └── shopify_source.yml
│ │ │ └── staging/
│ │ │ └── stg_abandoned_checkouts.sql
│ │ └── profiles.yml
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── ticket_volume_analytics_zendesk_support/
│ ├── .gitignore
│ ├── README.md
│ ├── dbt_project/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── dbt_project.yml
│ │ ├── models/
│ │ │ ├── marts/
│ │ │ │ ├── busier_day_of_week_analysis.sql
│ │ │ │ ├── pattern_and_trend_analysis.sql
│ │ │ │ ├── seasonal_analysis.sql
│ │ │ │ ├── ticket_priority_analysis.sql
│ │ │ │ ├── ticket_resolution_time_analysis.sql
│ │ │ │ ├── ticket_source_analysis.sql
│ │ │ │ └── ticket_volume_analysis.sql
│ │ │ ├── sources/
│ │ │ │ └── zendesk_support_sources.yml
│ │ │ └── staging/
│ │ │ ├── stg_schedules.sql
│ │ │ ├── stg_ticket_metrics.sql
│ │ │ ├── stg_tickets.sql
│ │ │ └── stg_users.sql
│ │ ├── profiles.yml
│ │ ├── snapshots/
│ │ │ └── .gitkeep
│ │ └── tests/
│ │ └── .gitkeep
│ ├── infra/
│ │ ├── .gitignore
│ │ └── airbyte/
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── orchestration/
│ │ ├── orchestration/
│ │ │ ├── __init__.py
│ │ │ ├── assets.py
│ │ │ ├── constants.py
│ │ │ ├── definitions.py
│ │ │ └── schedules.py
│ │ ├── pyproject.toml
│ │ └── setup.py
│ └── setup.py
├── vector_store_integration/
│ ├── AI_assistant_streamlit_app/
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── app.py
│ │ └── requirements.txt
│ ├── RAG_using_PGVector.ipynb
│ ├── RAG_using_Snowflake_Cortex.ipynb
│ └── RAG_using_Vectara.ipynb
└── weather_data_stack/
├── .gitignore
├── README.md
├── dbt_project/
│ ├── .gitignore
│ ├── README.md
│ ├── analyses/
│ │ └── .gitkeep
│ ├── dbt_project.yml
│ ├── macros/
│ │ └── .gitkeep
│ ├── models/
│ │ ├── marts/
│ │ │ └── historial_weather_trends.sql
│ │ ├── sources/
│ │ │ └── weatherstack_source.yml
│ │ └── staging/
│ │ └── stg_current_weather.sql
│ ├── profiles.yml
│ ├── seeds/
│ │ └── .gitkeep
│ ├── snapshots/
│ │ └── .gitkeep
│ └── tests/
│ └── .gitkeep
├── orchestration/
│ ├── orchestration/
│ │ ├── __init__.py
│ │ ├── assets.py
│ │ ├── constants.py
│ │ ├── definitions.py
│ │ └── schedules.py
│ ├── pyproject.toml
│ └── setup.py
└── setup.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .devcontainer/README.md
================================================
# `.devcontainer` Config
This directory houses a set of Dev Container config files, which streamline contributions from team and community members.
## Developing in the Browser using Codespaces
GitHub Codespaces allows maintainers and contributors to launch directly into a web browser window that hosts the VS Code IDE.
## Container Prebuild Optimizations
Prebuilds of these dev containers can significantly speed up launch times.
## Sharing Codespace Links
Per the [GitHub Docs](https://docs.github.com/en/codespaces/setting-up-your-project-for-codespaces/setting-up-your-repository/facilitating-quick-creation-and-resumption-of-codespaces#creating-a-link-to-the-codespace-creation-page-for-your-repository), you can:
- Create a codespace for the default branch:
- [`https://codespaces.new/airbytehq/quickstarts`](https://codespaces.new/airbytehq/airbyte)
- Create a codespace for a specific branch of the repository:
- `https://codespaces.new/airbytehq/quickstarts/tree/BRANCH-NAME`
- `https://codespaces.new/FORK-NAME/quickstarts/tree/BRANCH-NAME`
- E.g. https://codespaces.new/aaronsteers/quickstarts/tree/aj%2Ffeat%2Fdevcontainers
- Create a codespace for a pull request:
- https://codespaces.new/airbytehq/quickstarts/pull/PR-SHA
================================================
FILE: .devcontainer/devcontainer.json
================================================
// This is a generic devcontainer definition for working with Quickstarts.
//
// Included in this devcontainer:
// - Python (3.10)
// - Terraform CLI
// - dbt (BigQuery variant)
// - Docker-In-Docker support (DinD)
// - Various VS Code extensions supporting the above 👆
{
"name": "Airbyte Quickstarts Dev Container (Generic)",
// For general devcontainer config, see: https://aka.ms/devcontainer.json
// For Python-specific options, see: https://github.com/devcontainers/templates/tree/main/src/python
"image": "mcr.microsoft.com/devcontainers/python:0-3.10",
"features": {
// Features to add to the dev container.
// More info: https://containers.dev/features.
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers-contrib/features/poetry:2": {},
"ghcr.io/devcontainers/features/terraform": {},
"ghcr.io/devcontainers-contrib/features/pipx-package:1": {
"package": "dbt-bigquery",
"version": "1.7.2",
"interpreter": "python3",
"includeDeps": true // ...because dbt-query doesn't directly surface the dbt CLI
}
},
"overrideFeatureInstallOrder": [
// Strict ordering gives best chance of cache reuse.
// Put things that aren't changing at top of list:
"ghcr.io/devcontainers/features/docker-in-docker:2",
"ghcr.io/devcontainers/features/terraform",
"ghcr.io/devcontainers-contrib/features/poetry:2",
"ghcr.io/devcontainers-contrib/features/pipx-package:1"
],
// Configure tool-specific properties.
"customizations": {
"vscode": {
"extensions": [
// Python extensions:
"charliermarsh.ruff",
"ms-python.black-formatter",
"ms-python.mypy-type-checker",
"ms-python.python",
"ms-python.vscode-pylance",
"ms-toolsai.jupyter",
// Toml support:
"tamasfe.even-better-toml",
// Yaml and JSON Schema support:
"redhat.vscode-yaml",
// Contributing:
"GitHub.vscode-pull-request-github"
],
"settings": {
"extensions.ignoreRecommendations": true,
"git.autofetch": true,
"git.openRepositoryInParentFolders": "always",
"python.defaultInterpreterPath": ".venv/bin/python",
"python.interpreter.infoVisibility": "always",
"python.terminal.activateEnvironment": true,
"python.testing.pytestEnabled": true
}
}
},
"containerEnv": {
"POETRY_VIRTUALENVS_IN_PROJECT": "true"
}
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "pip3 install --user -r requirements.txt",
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}
================================================
FILE: .gitignore
================================================
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
#Desktop Services Store
.DS_Store
# PyAirbyte caches and virtual environments
.cache
.venv*
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing to Airbyte Quickstarts
Thank you for considering contributing to Airbyte Quickstarts! 🌟 It’s people like you that make this project valuable for the community. Whether it’s fixing bugs or adding new Quickstarts, we welcome your contributions.
## How Can I Contribute?
### 1. Reporting Bugs
- First, check the [Issues](https://github.com/airbytehq/quickstarts/issues) to see if the bug has already been reported.
- If it hasn’t, [open a new issue](https://github.com/airbytehq/quickstarts/issues/new), providing a descriptive title and a clear description.
### 2. Suggesting Enhancements
- Before suggesting enhancements, please read the [documentation](https://github.com/airbytehq/quickstarts/blob/main/README.md) and check the [Issues](https://github.com/airbytehq/quickstarts/issues) to see if it has been discussed before.
- If it hasn’t, [open a new issue](https://github.com/airbytehq/quickstarts/issues/new), providing a descriptive title, detailed description, and use case.
### 3. Submitting Changes and New Quickstarts
1. **Fork the Repository**: Create your own fork of the [quickstarts repository](https://github.com/airbytehq/quickstarts).
2. **Clone the Repository**: Clone your forked repository to your local machine.
```sh
git clone https://github.com/your-username/quickstarts.git
```
3. **Create a Branch**: Create a new branch from `main` for your changes.
```sh
git checkout -b feature/my-new-feature
```
4. **Make Changes**: Make your changes or additions to the new branch.
5. **Commit Changes**: Commit your changes with a clear and descriptive commit message.
```sh
git commit -m "Add a new quickstart"
```
6. **Push Changes**: Push your changes to your fork on GitHub.
```sh
git push origin feature/my-new-feature
```
7. **Submit a Pull Request**: Go to the [Pull Requests](https://github.com/airbytehq/quickstarts/pulls) of the original repository and create a new pull request. Provide a clear description of the changes and reference any related issues.
### 4. Notes for New Quickstarts
1. **Create it in a New Directory**: Each Quickstart should live in its own directory and be a standalone project.
2. **Add a README.md**: All Quickstarts should have clear and detailed instructions about how to set them up.
## Style Guides
- Write clean and simple code, following the existing code structure and naming conventions.
- For Markdown files, adhere to [Markdown Guide](https://www.markdownguide.org/extended-syntax/).
- Include comments with clear explanations of your code.
- Update the documentation (README.md) if needed, to reflect the changes made.
## Review Process
Once your pull request is submitted, maintainers will review it. They may ask for additional changes or clarifications. Once the pull request is approved, it will be merged into the main branch.
## Contact
For questions or help with the contributing process, please reach out in the #hackathons channel in the [Airbyte Slack](https://airbytehq.slack.com/).
Thank you for contributing to Airbyte Quickstarts! 🚀
================================================
FILE: README.md
================================================
# Airbyte Quickstarts
Welcome to Airbyte Quickstarts! This repository provides various templates to help you quickly build your data stack tailored to different domains like Marketing, Product, Finance, Operations, and more.
## Objective
To empower data teams by providing ready-to-use code templates, enabling the swift and efficient deployment of data stacks with minimal configuration.
## How To Start?
1. **Choose a Template**: Navigate to the Quickstart that suits you needs. Each folder in this repository is a Quickstart and can be used as a standalone project.
2. **Follow Setup Instructions**: Each Quickstart contains a `README.md` file with step-by-step instructions to set up the stack.
3. **Customize**: Modify the Quickstart as needed to suit your specific requirements.
## List Of Available Quickstarts
- [Airbyte, dbt, Airflow and BigQuery E-commerce Stack](./airbyte_dbt_airflow_bigquery)
- [Airbyte, dbt, Airflow and Snowflake Basic Stack](./airbyte_dbt_airflow_snowflake)
- [Airbyte, dbt, Dagster and BigQuery Basic Stack](./airbyte_dbt_dagster)
- [Airbyte, dbt, Dagster and Snowflake Basic Stack](./airbyte_dbt_dagster_snowflake)
- [Airbyte, dbt, Prefect and BigQuery (PAD) Stack](./airbyte_dbt_prefect_bigquery)
- [Airbyte, dbt, Prefect and Snowflake Basic Stack](./airbyte_dbt_prefect_snowflake)
- [Airbyte, dbt, Snowflake and Looker Basic Stack](./airbyte_dbt_snowflake_looker)
- [API to Data Warehouse Integration Stack](./api_to_warehouse)
- [Customer Satisfaction Analytics Stack With Zendesk Support, Airbyte, dbt, Dagster and BigQuery](./satisfaction_analytics_zendesk_support)
- [Customer Ticket Volume Analytics Stack With Zendesk Support, Airbyte, dbt, Dagster and BigQuery](./ticket_volume_analytics_zendesk_support)
- [Database Snapshot Stack](./database_snapshot)
- [E-commerce Analytics with Airbyte, dbt, Dagster and BigQuery](./ecommerce_analytics_bigquery)
- [ELT simplified stack with Airbyte, dbt, Prefect, Github and Bigquery](./elt_simplified_stack)
- [Github Insight Stack with Airbyte, dbt, Dagster and BigQuery](./github_insight_stack)
- [Low-Latency Data Availability Stack](./low_latency_data_availability)
- [MongoDB MySQL Integration Stack](./mongodb_mysql_integration)
- [Multisource Database Aggregtion Stack](./multisource_aggregation)
- [Postgres Data Replication Stack](./postgres_data_replication)
- [Postgres to MySQL Database Migration Stack](./postgres_to_mysql_migration)
- [Postgres to Snowflake Data Integraton](./postgres_snowflake_integration)
## Contribution Guidelines
We highly encourage community contributions to help improve, expand and add new Quickstarts! Please read our [Contribution Guidelines](CONTRIBUTING.md) before making a submission.
If you're looking to contribute with a new Quickstart, you can look for inspiration in the [Issues](https://github.com/airbytehq/quickstarts/issues) tab. There, we keep a list of our most wanted Quickstarts and often offer rewards for contributions, for example, during our different Hackathons.
## Contact
For questions or help with the contributing process, please reach out in the #hackathons channel in the [Airbyte Slack](https://airbytehq.slack.com/).
================================================
FILE: airbyte_dbt_airflow_bigquery/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_airflow_bigquery/README.md
================================================
# E-commerce Analytics Stack with Airbyte, dbt, Airflow (ADA) and BigQuery
Welcome to the Airbyte, dbt and Airflow (ADA) Stack with BigQuery quickstart! This repo contains the code to show how to utilize Airbyte and dbt for data extraction and transformation, and implement Apache Airflow to orchestrate the data workflows, providing a end-to-end ELT pipeline. With this setup, you can pull fake e-commerce data, put it into BigQuery, and play around with it using dbt and Airflow.
Here's the diagram of the end to end data pipeline you will build, from the Airflow DAG Graph view:

And here are the transformations happening when the dbt DAG is executed:

## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors](#3-setting-up-airbyte-connectors)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Setting Up Airflow](#5-setting-up-airflow)
- [Orchestrating with Airflow](#6-orchestrating-with-airflow)
- [Next Steps](#7-next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte locally. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform (Optional)**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli). This is an optional step because you can also create and manage Airbyte resources via the UI. Both ways will be described below.
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_airflow_bigquery
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_airflow_bigquery
```
At this point you can view the code in your preferred IDE.
The next steps are only necessary if want to develop or test the dbt models locally, since Airbyte and Airflow are running on Docker.
3. **Set up a virtual environment**:
You can use the following commands, just make sure to adapt to your specific python installation.
- For Linux and Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting up BigQuery
1. **Create a Google Cloud project**:
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
2. **Create BigQuery datasets**:
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
3. **Create a Service Account and Assign Roles**:
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account.
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
4. **Generate a JSON key for the Service Account**:
- Make a JSON key to let the service account sign in.
**How to generate a JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
## 3. Setting Up Airbyte Connectors
To set up your Airbyte connectors, you can choose to do it via Terraform, or the UI. Choose one of the two following options.
### 3.1. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations via Terraform, facilitating data synchronization between various platforms. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs:
- Provide credentials for your BigQuery connection in the `main.tf` file.
- `dataset_id`: The name of the BigQuery dataset where Airbyte will load data. In this case, enter “raw_data”.
- `project_id`: Your BigQuery project ID.
- `credentials_json`: The contents of the service account JSON file. You should input a string, so you need to convert the JSON content to string beforehand.
- `workspace_id`: Your Airbyte workspace ID, which can be found in the webapp url. For example, in this url: http://localhost:8000/workspaces/910ab70f-0a67-4d25-a983-999e99e1e395/ the workspace id would be `910ab70f-0a67-4d25-a983-999e99e1e395`.
- Alternatively, you can utilize the `variables.tf` file to manage these credentials:
- You’ll be prompted to enter the credentials when you execute `terraform plan` and `terraform apply`. If going for this option, just move to the next step. If you don’t want to use variables, remove them from the file.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go 🎉.
### 3.2. Setting Up Airbyte Connectors Using the UI
Start by launching the Airbyte UI by going to http://localhost:8000/ in your browser. Then:
1. **Create a source**:
- Go to the Sources tab and click on `+ New source`.
- Search for “faker” using the search bar and select `Sample Data (Faker)`.
- Adjust the Count and optional fields as needed for your use case. You can also leave as is.
- Click on `Set up source`.
2. **Create a destination**:
- Go to the Destinations tab and click on `+ New destination`.
- Search for “bigquery” using the search bar and select `BigQuery`.
- Enter the connection details as needed.
- For simplicity, you can use `Standard Inserts` as the loading method.
- In the `Service Account Key JSON` field, enter the contents of the JSON file. Yes, the full JSON.
- Click on `Set up destination`.
3. **Create a connection**:
- Go to the Connections tab and click on `+ New connection`.
- Select the source and destination you just created.
- Enter the connection details as needed.
- Click on `Set up connection`.
That’s it! Your connection is set up and ready to go! 🎉
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Move to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
- You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details. Specifically, you need to update the Service Account JSON file path, the dataset location and your BigQuery project ID.
- Provide your BigQuery project ID in the `database` field of the `/models/ecommerce/sources/faker_sources.yml` file.
If you want to avoid hardcoding credentials in the `profiles.yml` file, you can leverage environment variables. Here's an example: `keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"`
3. **Test the Connection (Optional)**:
You can test the connection to your BigQuery instance using the following command. Just take into account that you would need to provide the local path to your service account key file instead.
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery 🎉.
## 5. Setting Up Airflow
Let's set up Airflow for our project, following the steps below. We are basing our setup on the Running Airflow in Docker guide, with some customizations:
1. **Navigate to the Orchestration Directory**:
```bash
cd ../orchestration
```
2. **Set Environment Variables**:
- Open the `.env.example` file located in the `orchestration` directory.
- Update the necessary fields, paying special attention to the `GCP_SERVICE_ACCOUNT_PATH`, which should point to your local service account JSON key directory path.
- Rename the file from `.env.example` to `.env` after filling in the details.
3. **Build the custom Airflow image**:
```bash
docker compose build
```
4. **Launch the Airflow container**:
```bash
docker compose up
```
This might take a few minutes initially as it sets up necessary databases and metadata.
5. **Setting up Airflow Connections**:
Both for using Airbyte and dbt, we need to set up connections in Airflow:
- Access the Airflow UI by navigating to `http://localhost:8080` in your browser. The default username and password are both `airflow`, unless you changed it on the `.env` file.
- Go to the "Admin" > "Connections" tab.
**5.1. Create Airbyte Connection**:
Click on the `+` button to create a new connection and fill in the following details to create an Airbyte connection:
- **Connection Id**: The name of the connection, this will be used in the DAGs responsible for triggering Airbyte syncs. Name it `airbyte_connection`.
- **Connection Type**: The type of the connection. In this case, select `Airbyte`.
- **Host**: The host of the Airbyte instance. Since we're running it locally, use `airbyte-proxy`, which is the name of the container running Airbyte. In case you have a remote instance, you can use the URL of the instance.
- **Port**: The port of the Airbyte instance. By default the API is exposed on port `8001`.
- **Login**: If you're using the proxy (it's used by default in the official Airbyte Docker Compose file), this is required. By default it's `airbyte`.
- **Password**: If you're using the proxy (it's used by default in the official Airbyte Docker Compose file), this is required. By default it's `password`.
Click on the `Test` button, and make sure you get a `Connection successfully tested` message at the top. Then, you can `Save` the connection.
**5.2. Create Google Cloud (BigQuery) connection**:
Click on the `+` button to create a new connection and fill in the following details to create an Google Cloud connection:
- **Connection Id**: The name of the connection, this one will be used in the DAGs responsible for triggering dbt runs. Name it `dbt_file_connection`.
- **Connection Type**: The type of the connection. Select `Google Cloud` from the drop down menu.
- **Project ID**: The Google Cloud project ID.
- **Keyfile path**: The path to the service account key file. In this case, it's mounted to `/opt/airflow/service_accounts/[your-service-account-key-file].json`.
- Alternatively, you can use the **Keyfile JSON** field and paste the contents of the key file.
Click on the `Test` button, and make sure you get a `Connection successfully tested` message at the top. Then, you can `Save` the connection.
6. **Integrate dbt with Airflow**:
We use [Astronomer Cosmos](https://astronomer.github.io/astronomer-cosmos/) to integrate dbt with Airflow. This library parses DAGs and Task Groups from dbt models, and allows us to use Airflow connections instead of dbt profiles. Additionally, it runs tests automatically after each model is completed. To set it up, we've created the file `orchestration/airflow/config/dbt_config.py` with the necessary configurations.
Update the following in the `dbt_config.py` file, if necessary:
- The `location` key inside `google_config` with the location of your BigQuery `transformed_data` dataset, if it's not `US`.
- The method used to create the `google_condig`. The code uses the `GoogleCloudServiceAccountFileProfileMapping` method, assuming that the Google Cloud connection in Airflow was created using the *Keyfile Path*. If you used the *Keyfile JSON*, you should use the `GoogleCloudServiceAccountDictProfileMapping` method instead.
7. **Link Airbyte connection to the Airflow DAG**:
The last step being being able to execute the DAG in Airflow, is to include the `connection_id` from Airbyte:
- Visit the Airbyte UI at http://localhost:8000/.
- In the "Connections" tab, select the "Faker to BigQuery" connection and copy its connection id from the URL.
- Update the `connection_id` in the `extract_data` task within `orchestration/airflow/dags/elt_dag.py` with this id.
That's it! Airflow has been configured to work with dbt and Airbyte. 🎉
## 6. Orchestrating with Airflow
Now that everything is set up, it's time to run your data pipeline!
- In the Airflow UI, go to the "DAGs" section.
- Locate `elt_dag` and click on "Trigger DAG" under the "Actions" column.
This will initiate the complete data pipeline, starting with the Airbyte sync from Faker to BigQuery, followed by dbt transforming the raw data into `staging` and `marts` models. As the last step, it generates dbt docs.
- Confirm the sync status in the Airbyte UI.
- After dbt jobs completion, check the BigQuery console to see the newly created views in the `transformed_data` dataset.
- Once the dbt pipeline completes, you can check the dbt docs from the Airflow UI by going to the "Custom Docs" > "dbt" tab.
Congratulations! You've successfully run an end-to-end workflow with Airflow, dbt and Airbyte. 🎉
## 7. Next Steps
Once you've gone through the steps above, you should have a working Airbyte, dbt and Airflow (ADA) Stack with BigQuery. You can use this as a starting point for your project, and adapt it to your needs. There are lots of things you can do beyond this point, and these tools are evolving fast and adding new features almost every week. Here are some ideas to continue your project:
1. **Expand your data sources**:
This quickstart uses a very simple data source. Airbyte provides hundreds of sources that might be integrated into your pipeline. And besides configuring and orchestrating them, don't forget to add them as sources in your dbt project. This will make sure you have a lineage graph like the one we showed in the beginning of this document.
2. **Dive into dbt and improve your transformations**:
dbt is a very powerful tool, and it has lots of features that can help you improve your transformations. You can find more details in the [dbt Documentation](https://docs.getdbt.com/). It's very important that you understand the types of materializations and incremental models, as well as understanding the models, sources, metrics and everything else that dbt provides.
3. **Apply Data Quality into your pipeline**
dbt provides a simple test framework that is a good starting point, but there is a lot more you can do to ensure your data is correct. You can use Airflow to run manual data quality checks, by using [Sensors](https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/sensors.html) or operators that run custom queries. You can also use specialized tools such as [Great Expectations](https://greatexpectations.io/) to create more complex data quality checks.
4. **Monitoring and alerts**
Airflow's UI is a good start for simple monitoring, but as your pipelines scale it might be useful to have a more robust monitoring solution. You can use tools such as [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/) to create dashboards and alerts for your pipelines, but you can create [notifications using Airflow](https://airflow.apache.org/docs/apache-airflow/2.6.0/howto/notifications.html) or other tools such as [re_data](https://docs.getre.io/latest/docs/re_data/introduction/whatis_data/).
5. **Contribute with the community**
All tools mentioned here are open-source and have very active communities. You can contribute with them by creating issues, suggesting features, or even creating pull requests. You can also contribute with the Airbyte community by creating [connectors](https://docs.airbyte.io/connector-development) for new sources and destinations.
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
## This project
We've created two dbt models: example (which contains the default dbt example from jaffle-shop) and ecommerce, which uses data from the dataset extracted via airbyte using the Faker source.
This project is being orchestrated via Apache Airflow using the [Astronomer Cosmos](https://astronomer.github.io/astronomer-cosmos/) project. For more details in orchestrating dbt models with Airflow, you can check the `orchestration` folder in this quickstart.
The ecommerce dbt model was forked and updated from the [Ecommerce Analytics Bigquery Quickstart](https://github.com/airbytehq/quickstarts/tree/main/ecommerce_analytics_bigquery).
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ['models']
analysis-paths: ['analyses']
test-paths: ['tests']
seed-paths: ['seeds']
macro-paths: ['macros']
snapshot-paths: ['snapshots']
clean-targets: # directories to be removed by `dbt clean`
- 'target'
- 'dbt_packages'
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
ecommerce:
+materialized: view
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/marts/product_popularity.sql
================================================
WITH base AS (
SELECT
product_id,
COUNT(id) AS purchase_count
FROM {{ ref('stg_purchases') }}
GROUP BY 1
)
SELECT
p.id,
p.make,
p.model,
b.purchase_count
FROM {{ ref('stg_products') }} p
LEFT JOIN base b ON p.id = b.product_id
ORDER BY b.purchase_count DESC
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/marts/purchase_patterns.sql
================================================
SELECT
user_id,
product_id,
purchased_at,
added_to_cart_at,
TIMESTAMP_DIFF(purchased_at, added_to_cart_at, SECOND) AS time_to_purchase_seconds,
returned_at
FROM {{ ref('stg_purchases') }}
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/marts/schema.yml
================================================
version: 2
models:
- name: product_popularity
columns:
- name: id
tests:
- unique
- name: make
tests:
- not_null
- name: model
tests:
- not_null
- name: purchase_count
tests:
- not_null
- name: purchase_patterns
columns:
- name: user_id
tests:
- not_null
- name: product_id
tests:
- not_null
- name: purchased_at
- name: added_to_cart_at
tests:
- not_null
- name: time_to_purchase_seconds
- name: returned_at
- name: user_demographics
columns:
- name: gender
tests:
- not_null
- name: academic_degree
tests:
- not_null
- name: nationality
tests:
- not_null
- name: average_age
tests:
- not_null
- name: user_count
tests:
- not_null
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/marts/user_demographics.sql
================================================
WITH base AS (
SELECT
id AS user_id,
gender,
academic_degree,
nationality,
age
FROM {{ ref('stg_users') }}
)
SELECT
gender,
academic_degree,
nationality,
AVG(age) AS average_age,
COUNT(user_id) AS user_count
FROM base
GROUP BY 1, 2, 3
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/sources/faker_sources.yml
================================================
version: 2
sources:
- name: faker
project: your_project_id # Update this field with your BigQuery project ID
dataset: raw_data
tables:
- name: users
description: 'Simulated user data from the Faker connector.'
columns:
- name: id
description: 'Unique identifier for the user.'
- name: address
- name: occupation
- name: gender
- name: academic_degree
- name: weight
- name: created_at
- name: language
- name: telephone
- name: title
- name: updated_at
- name: nationality
- name: blood_type
- name: name
- name: age
- name: email
- name: height
- name: _airbyte_raw_id
- name: _airbyte_extracted_at
- name: _airbyte_meta
- name: products
description: 'Simulated product data from the Faker connector.'
columns:
- name: id
description: 'Unique identifier for the product.'
- name: updated_at
- name: year
- name: price
- name: created_at
- name: model
- name: make
- name: _airbyte_raw_id
- name: _airbyte_extracted_at
- name: _airbyte_meta
- name: purchases
description: 'Simulated purchase data from the Faker connector.'
columns:
- name: id
description: 'Unique identifier for the purchase.'
- name: updated_at
- name: purchased_at
- name: user_id
- name: returned_at
- name: product_id
- name: created_at
- name: added_to_cart_at
- name: _airbyte_raw_id
- name: _airbyte_extracted_at
- name: _airbyte_meta
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/staging/schema.yml
================================================
version: 2
models:
- name: stg_users
columns:
- name: id
tests:
- unique
- name: gender
tests:
- not_null
- name: academic_degree
tests:
- not_null
- name: title
tests:
- not_null
- name: nationality
tests:
- not_null
- name: age
tests:
- not_null
- name: name
tests:
- not_null
- name: email
tests:
- not_null
- name: created_at
tests:
- not_null
- name: updated_at
tests:
- not_null
- name: _airbyte_extracted_at
tests:
- not_null
- name: stg_purchases
columns:
- name: id
tests:
- unique
- name: user_id
tests:
- not_null
- name: product_id
tests:
- not_null
- name: updated_at
tests:
- not_null
- name: purchased_at
- name: returned_at
- name: created_at
tests:
- not_null
- name: added_to_cart_at
- name: _airbyte_extracted_at
tests:
- not_null
- name: stg_products
columns:
- name: id
tests:
- unique
- name: year
tests:
- not_null
- name: price
tests:
- not_null
- name: model
tests:
- not_null
- name: make
tests:
- not_null
- name: created_at
tests:
- not_null
- name: updated_at
tests:
- not_null
- name: _airbyte_extracted_at
tests:
- not_null
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/staging/stg_products.sql
================================================
select
id,
year,
price,
model,
make,
created_at,
updated_at,
_airbyte_extracted_at
from {{ source('faker', 'products') }}
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/staging/stg_purchases.sql
================================================
select
id,
user_id,
product_id,
updated_at,
purchased_at,
returned_at,
created_at,
added_to_cart_at,
_airbyte_extracted_at
from {{ source('faker', 'purchases') }}
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/models/ecommerce/staging/stg_users.sql
================================================
select
id,
gender,
academic_degree,
title,
nationality,
age,
name,
email,
created_at,
updated_at,
_airbyte_extracted_at,
from {{ source('faker', 'users') }}
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
keyfile: /opt/airflow/service_accounts/your_keyfile_path.json # Update this field with your file name, example: /opt/airflow/service_accounts/airflow-***116-83db69931a10.json
location: your_dataset_location # Update this field with your dataset location, example: US
method: service-account
priority: interactive
project: your_project_id # Update this field with your BigQuery project ID
threads: 1
type: bigquery
target: dev
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/seeds/raw_customers.csv
================================================
id,first_name,last_name
1,Michael,P.
2,Shawn,M.
3,Kathleen,P.
4,Jimmy,C.
5,Katherine,R.
6,Sarah,R.
7,Martin,M.
8,Frank,R.
9,Jennifer,F.
10,Henry,W.
11,Fred,S.
12,Amy,D.
13,Kathleen,M.
14,Steve,F.
15,Teresa,H.
16,Amanda,H.
17,Kimberly,R.
18,Johnny,K.
19,Virginia,F.
20,Anna,A.
21,Willie,H.
22,Sean,H.
23,Mildred,A.
24,David,G.
25,Victor,H.
26,Aaron,R.
27,Benjamin,B.
28,Lisa,W.
29,Benjamin,K.
30,Christina,W.
31,Jane,G.
32,Thomas,O.
33,Katherine,M.
34,Jennifer,S.
35,Sara,T.
36,Harold,O.
37,Shirley,J.
38,Dennis,J.
39,Louise,W.
40,Maria,A.
41,Gloria,C.
42,Diana,S.
43,Kelly,N.
44,Jane,R.
45,Scott,B.
46,Norma,C.
47,Marie,P.
48,Lillian,C.
49,Judy,N.
50,Billy,L.
51,Howard,R.
52,Laura,F.
53,Anne,B.
54,Rose,M.
55,Nicholas,R.
56,Joshua,K.
57,Paul,W.
58,Kathryn,K.
59,Adam,A.
60,Norma,W.
61,Timothy,R.
62,Elizabeth,P.
63,Edward,G.
64,David,C.
65,Brenda,W.
66,Adam,W.
67,Michael,H.
68,Jesse,E.
69,Janet,P.
70,Helen,F.
71,Gerald,C.
72,Kathryn,O.
73,Alan,B.
74,Harry,A.
75,Andrea,H.
76,Barbara,W.
77,Anne,W.
78,Harry,H.
79,Jack,R.
80,Phillip,H.
81,Shirley,H.
82,Arthur,D.
83,Virginia,R.
84,Christina,R.
85,Theresa,M.
86,Jason,C.
87,Phillip,B.
88,Adam,T.
89,Margaret,J.
90,Paul,P.
91,Todd,W.
92,Willie,O.
93,Frances,R.
94,Gregory,H.
95,Lisa,P.
96,Jacqueline,A.
97,Shirley,D.
98,Nicole,M.
99,Mary,G.
100,Jean,M.
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/seeds/raw_orders.csv
================================================
id,user_id,order_date,status
1,1,2018-01-01,returned
2,3,2018-01-02,completed
3,94,2018-01-04,completed
4,50,2018-01-05,completed
5,64,2018-01-05,completed
6,54,2018-01-07,completed
7,88,2018-01-09,completed
8,2,2018-01-11,returned
9,53,2018-01-12,completed
10,7,2018-01-14,completed
11,99,2018-01-14,completed
12,59,2018-01-15,completed
13,84,2018-01-17,completed
14,40,2018-01-17,returned
15,25,2018-01-17,completed
16,39,2018-01-18,completed
17,71,2018-01-18,completed
18,64,2018-01-20,returned
19,54,2018-01-22,completed
20,20,2018-01-23,completed
21,71,2018-01-23,completed
22,86,2018-01-24,completed
23,22,2018-01-26,return_pending
24,3,2018-01-27,completed
25,51,2018-01-28,completed
26,32,2018-01-28,completed
27,94,2018-01-29,completed
28,8,2018-01-29,completed
29,57,2018-01-31,completed
30,69,2018-02-02,completed
31,16,2018-02-02,completed
32,28,2018-02-04,completed
33,42,2018-02-04,completed
34,38,2018-02-06,completed
35,80,2018-02-08,completed
36,85,2018-02-10,completed
37,1,2018-02-10,completed
38,51,2018-02-10,completed
39,26,2018-02-11,completed
40,33,2018-02-13,completed
41,99,2018-02-14,completed
42,92,2018-02-16,completed
43,31,2018-02-17,completed
44,66,2018-02-17,completed
45,22,2018-02-17,completed
46,6,2018-02-19,completed
47,50,2018-02-20,completed
48,27,2018-02-21,completed
49,35,2018-02-21,completed
50,51,2018-02-23,completed
51,71,2018-02-24,completed
52,54,2018-02-25,return_pending
53,34,2018-02-26,completed
54,54,2018-02-26,completed
55,18,2018-02-27,completed
56,79,2018-02-28,completed
57,93,2018-03-01,completed
58,22,2018-03-01,completed
59,30,2018-03-02,completed
60,12,2018-03-03,completed
61,63,2018-03-03,completed
62,57,2018-03-05,completed
63,70,2018-03-06,completed
64,13,2018-03-07,completed
65,26,2018-03-08,completed
66,36,2018-03-10,completed
67,79,2018-03-11,completed
68,53,2018-03-11,completed
69,3,2018-03-11,completed
70,8,2018-03-12,completed
71,42,2018-03-12,shipped
72,30,2018-03-14,shipped
73,19,2018-03-16,completed
74,9,2018-03-17,shipped
75,69,2018-03-18,completed
76,25,2018-03-20,completed
77,35,2018-03-21,shipped
78,90,2018-03-23,shipped
79,52,2018-03-23,shipped
80,11,2018-03-23,shipped
81,76,2018-03-23,shipped
82,46,2018-03-24,shipped
83,54,2018-03-24,shipped
84,70,2018-03-26,placed
85,47,2018-03-26,shipped
86,68,2018-03-26,placed
87,46,2018-03-27,placed
88,91,2018-03-27,shipped
89,21,2018-03-28,placed
90,66,2018-03-30,shipped
91,47,2018-03-31,placed
92,84,2018-04-02,placed
93,66,2018-04-03,placed
94,63,2018-04-03,placed
95,27,2018-04-04,placed
96,90,2018-04-06,placed
97,89,2018-04-07,placed
98,41,2018-04-07,placed
99,85,2018-04-09,placed
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/seeds/raw_payments.csv
================================================
id,order_id,payment_method,amount
1,1,credit_card,1000
2,2,credit_card,2000
3,3,coupon,100
4,4,coupon,2500
5,5,bank_transfer,1700
6,6,credit_card,600
7,7,credit_card,1600
8,8,credit_card,2300
9,9,gift_card,2300
10,9,bank_transfer,0
11,10,bank_transfer,2600
12,11,credit_card,2700
13,12,credit_card,100
14,13,credit_card,500
15,13,bank_transfer,1400
16,14,bank_transfer,300
17,15,coupon,2200
18,16,credit_card,1000
19,17,bank_transfer,200
20,18,credit_card,500
21,18,credit_card,800
22,19,gift_card,600
23,20,bank_transfer,1500
24,21,credit_card,1200
25,22,bank_transfer,800
26,23,gift_card,2300
27,24,coupon,2600
28,25,bank_transfer,2000
29,25,credit_card,2200
30,25,coupon,1600
31,26,credit_card,3000
32,27,credit_card,2300
33,28,bank_transfer,1900
34,29,bank_transfer,1200
35,30,credit_card,1300
36,31,credit_card,1200
37,32,credit_card,300
38,33,credit_card,2200
39,34,bank_transfer,1500
40,35,credit_card,2900
41,36,bank_transfer,900
42,37,credit_card,2300
43,38,credit_card,1500
44,39,bank_transfer,800
45,40,credit_card,1400
46,41,credit_card,1700
47,42,coupon,1700
48,43,gift_card,1800
49,44,gift_card,1100
50,45,bank_transfer,500
51,46,bank_transfer,800
52,47,credit_card,2200
53,48,bank_transfer,300
54,49,credit_card,600
55,49,credit_card,900
56,50,credit_card,2600
57,51,credit_card,2900
58,51,credit_card,100
59,52,bank_transfer,1500
60,53,credit_card,300
61,54,credit_card,1800
62,54,bank_transfer,1100
63,55,credit_card,2900
64,56,credit_card,400
65,57,bank_transfer,200
66,58,coupon,1800
67,58,gift_card,600
68,59,gift_card,2800
69,60,credit_card,400
70,61,bank_transfer,1600
71,62,gift_card,1400
72,63,credit_card,2900
73,64,bank_transfer,2600
74,65,credit_card,0
75,66,credit_card,2800
76,67,bank_transfer,400
77,67,credit_card,1900
78,68,credit_card,1600
79,69,credit_card,1900
80,70,credit_card,2600
81,71,credit_card,500
82,72,credit_card,2900
83,73,bank_transfer,300
84,74,credit_card,3000
85,75,credit_card,1900
86,76,coupon,200
87,77,credit_card,0
88,77,bank_transfer,1900
89,78,bank_transfer,2600
90,79,credit_card,1800
91,79,credit_card,900
92,80,gift_card,300
93,81,coupon,200
94,82,credit_card,800
95,83,credit_card,100
96,84,bank_transfer,2500
97,85,bank_transfer,1700
98,86,coupon,2300
99,87,gift_card,3000
100,87,credit_card,2600
101,88,credit_card,2900
102,89,bank_transfer,2200
103,90,bank_transfer,200
104,91,credit_card,1900
105,92,bank_transfer,1500
106,92,coupon,200
107,93,gift_card,2600
108,94,coupon,700
109,95,coupon,2400
110,96,gift_card,1700
111,97,bank_transfer,1400
112,98,bank_transfer,1000
113,99,credit_card,2400
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_airflow_bigquery/infra/README.md
================================================
# Airbyte setup with terraform
This folder contains the terraform code to setup a source, destination and connection in Airbyte using terraform.
We're using the [airbyte official provider](https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs), and any details can be found in the documentation.
For this example we're using:
- [Airbyte Source Faker](https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/source_faker)
- [Airbyte Destination BigQuery](https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/destination_bigquery)
- [Airbyte Connection](https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/connection)
This is all optional, since part of the advantage of using Airbyte is setting up the sources and destinations via the UI. However, if you want to automate this process, you can use this terraform code as a starting point.
================================================
FILE: airbyte_dbt_airflow_bigquery/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_faker" "faker" {
configuration = {
always_updated = false
count = 1000
parallelism = 9
records_per_slice = 10
seed = 6
source_type = "faker"
}
name = "Faker"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = file(var.credentials_json_path)
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "faker_to_bigquery" {
name = "Faker to BigQuery"
source_id = airbyte_source_faker.faker.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "users"
},
{
name = "products"
},
{
name = "purchases"
},
]
}
}
================================================
FILE: airbyte_dbt_airflow_bigquery/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_airflow_bigquery/infra/airbyte/terraform.tfvars
================================================
workspace_id=""
dataset_id="sample_ecommerce"
project_id=""
credentials_json_path = ""
================================================
FILE: airbyte_dbt_airflow_bigquery/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json_path" {
type = string
}
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/.gitignore
================================================
logs
__pycache__
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/Dockerfile
================================================
FROM apache/airflow:2.7.2-python3.11
COPY requirements.txt /
RUN pip install --no-cache-dir -r /requirements.txt
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/README.md
================================================
# Airflow setup with Airbyte and DBT
This folder contains the code to setup Airflow with Airbyte and DBT.
## Setup
We're using the [Running Airflow in Docker](https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html) as a starting point.
We've downloaded the official `docker-compose.yaml` file provided by Airflow and adapted it to:
- Use some configurations from an .env file
- Add the Airbyte operator, dbt and astronomer-cosmos packages
- Mount our dbt project folder into the container image
- For running locally, we've set up the network to use the one deployed by the Airbyte container setup (from [Airbyte Local Deployment](https://docs.airbyte.com/deploying-airbyte/local-deployment))
- Admitting you're
## Features
- Providing dbt docs as a plugin from airflow, and making it available in the UI (and behing authentication)
- Example dag with the airbyte operator
- Example dag rendering dbt docs
- Example dag orchestrating specific dbt-models inside a dag with multiple tasks
- Example dag orchestrating specific dbt models as a dag
We're also using dataset aware schedules, and the airflow decorator to write the dag code.
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/airflow/config/dbt_config.py
================================================
from cosmos.config import ProjectConfig, ProfileConfig
from cosmos.profiles import GoogleCloudServiceAccountDictProfileMapping, GoogleCloudServiceAccountFileProfileMapping
project_config = ProjectConfig(
dbt_project_path="/opt/airflow/dbt_project",
)
google_config = GoogleCloudServiceAccountFileProfileMapping(
conn_id="dbt_file_connection",
profile_args={
"dataset": "transformed_data",
"location": "US", # Update if you're using a different location for your dataset
"threads": 1,
"retries": 1,
"priority": "interactive",
}
)
profile_config = ProfileConfig(
profile_name="dbt_project",
target_name="dev",
profile_mapping=google_config
)
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/airflow/dags/elt_dag.py
================================================
from pendulum import datetime
from airflow.decorators import dag
from airflow.providers.airbyte.operators.airbyte import AirbyteTriggerSyncOperator
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from cosmos import DbtDag
from cosmos.operators import DbtDocsOperator
from cosmos.config import RenderConfig
from dbt_config import project_config, profile_config # type:ignore
from dbt_upload_docs import upload_docs # type:ignore
# Define the ELT DAG
@dag(
dag_id="elt_dag",
start_date=datetime(2023, 10, 1),
schedule="@daily",
tags=["airbyte", "dbt", "bigquery", "ecommerce"],
catchup=False,
)
def extract_and_transform():
"""
Runs the connection "Faker to BigQuery" on Airbyte and then triggers the dbt DAG.
"""
# Airbyte sync task
extract_data = AirbyteTriggerSyncOperator(
task_id="trigger_airbyte_faker_to_bigquery",
airbyte_conn_id="airbyte_connection",
connection_id="your_connection_id", # Update with your Airbyte connection ID
asynchronous=False,
timeout=3600,
wait_seconds=3
)
# Trigger for dbt DAG
trigger_dbt_dag = TriggerDagRunOperator(
task_id="trigger_dbt_dag",
trigger_dag_id="dbt_ecommerce",
wait_for_completion=True,
poke_interval=30,
)
render_dbt_docs = DbtDocsOperator(
task_id="render_dbt_docs",
profile_config=profile_config,
project_dir="/opt/airflow/dbt_project",
callback=upload_docs,
)
# Set the order of tasks
extract_data >> trigger_dbt_dag >> render_dbt_docs
# Instantiate the ELT DAG
extract_and_transform_dag = extract_and_transform()
# Define the dbt DAG using DbtDag from the cosmos library
dbt_cosmos_dag = DbtDag(
dag_id="dbt_ecommerce",
start_date=datetime(2023, 10, 1),
tags=["dbt", "ecommerce"],
catchup=False,
project_config=project_config,
profile_config=profile_config,
render_config=RenderConfig(select=["path:models/ecommerce"]),
)
# Instantiate the dbt DAG
dbt_cosmos_dag
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/airflow/plugins/custom_docs_plugin.py
================================================
"""Plugins example"""
from __future__ import annotations
from flask import Blueprint
from flask_appbuilder import BaseView, expose
from airflow.plugins_manager import AirflowPlugin
from airflow.security import permissions
from airflow.www.auth import has_access
bp = Blueprint(
"Docs Plugin",
__name__,
template_folder="templates",
static_folder="static",
static_url_path="/dbtdocspluginview",
)
class DbtDocsPluginView(BaseView):
"""Creating a Flask-AppBuilder View"""
default_view = "index"
@expose("/")
@has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
]
)
def index(self):
"""Create default view"""
return self.render_template("dbt/index.html", name="DBT")
# Creating a flask blueprint
class CustomDocsPlugin(AirflowPlugin):
"""Defining the plugin class"""
name = "Docs Plugin"
flask_blueprints = [bp]
appbuilder_views = [{
"name": "dbt",
"category": "Custom Docs",
"view": DbtDocsPluginView()
}]
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/airflow/plugins/dbt_upload_docs.py
================================================
import shutil, os, re
def fix_file():
with open('/opt/airflow/plugins/templates/dbt/dbt_index.html') as f:
html_contents = f.read()
# Define a regular expression to match the script tag
script_regex = r''
# Find the script tag that you want to extract
script_match = re.search(script_regex, html_contents, re.DOTALL)
# Get the contents of the script tag
script_contents = script_match.group(1)
# Write the script contents to a separate JavaScript file
if not os.path.exists('/opt/airflow/plugins/static'):
os.makedirs('/opt/airflow/plugins/static')
with open('/opt/airflow/plugins/static/script.js', 'w') as f:
f.write(script_contents)
# Remove the script tag from the HTML contents
html_contents = html_contents.replace(script_contents,"")
# Add a new script tag to the head section of the HTML contents
new_script_tag = f''
head_regex = r'
(.*?)'
head_match = re.search(head_regex, html_contents, re.DOTALL)
head_contents = head_match.group(1)
head_contents += new_script_tag
html_contents = re.sub(head_regex, '' + head_contents + '', html_contents, flags=re.DOTALL)
# Write the modified HTML contents to a new file
with open('/opt/airflow/plugins/templates/dbt/index.html', 'w') as f:
f.write(html_contents)
def upload_docs(project_dir):
# upload docs to a storage of your choice
# you only need to upload the following files:
# - f"{project_dir}/target/index.html"
# - f"{project_dir}/target/manifest.json"
# - f"{project_dir}/target/graph.gpickle"
# - f"{project_dir}/target/catalog.json"
shutil.move(f"{project_dir}/target/index.html", "/opt/airflow/plugins/templates/dbt/dbt_index.html")
shutil.move(f"{project_dir}/target/manifest.json", "/opt/airflow/plugins/static/manifest.json")
shutil.move(f"{project_dir}/target/graph.gpickle", "/opt/airflow/plugins/static/graph.gpickle")
shutil.move(f"{project_dir}/target/catalog.json", "/opt/airflow/plugins/static/catalog.json")
fix_file()
pass
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/airflow/plugins/static/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/airflow/plugins/templates/dbt/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/docker-compose.yaml
================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Basic Airflow cluster configuration for CeleryExecutor with Redis and PostgreSQL.
#
# WARNING: This configuration is for local development. Do not use it in a production deployment.
#
# This configuration supports basic configuration using environment variables or an .env file
# The following variables are supported:
#
# AIRFLOW_IMAGE_NAME - Docker image name used to run Airflow.
# Default: apache/airflow:2.7.2
# AIRFLOW_UID - User ID in Airflow containers
# Default: 50000
# AIRFLOW_PROJ_DIR - Base path to which all the files will be volumed.
# Default: .
# Those configurations are useful mostly in case of standalone testing/running Airflow in test/try-out mode
#
# _AIRFLOW_WWW_USER_USERNAME - Username for the administrator account (if requested).
# Default: airflow
# _AIRFLOW_WWW_USER_PASSWORD - Password for the administrator account (if requested).
# Default: airflow
# _PIP_ADDITIONAL_REQUIREMENTS - Additional PIP requirements to add when starting all containers.
# Use this option ONLY for quick checks. Installing requirements at container
# startup is done EVERY TIME the service is started.
# A better way is to build a custom image or extend the official image
# as described in https://airflow.apache.org/docs/docker-stack/build.html.
# Default: ''
#
# Feel free to modify this file to suit your needs.
---
version: '3.8'
x-airflow-common: &airflow-common
# In order to add custom dependencies or upgrade provider packages you can use your extended image.
# Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml
# and uncomment the "build" line below, Then run `docker-compose build` to build the images.
# image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.7.2}
build: .
env_file:
- ./.env
environment: &airflow-common-env
AIRFLOW__CORE__EXECUTOR: CeleryExecutor
AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://${POSTGRES_USER:-airflow}:${POSTGRES_PASSWORD:-airflow}@postgres/airflow
# For backward compatibility, with Airflow <2.3
AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://${POSTGRES_USER:-airflow}:${POSTGRES_PASSWORD:-airflow}@postgres/airflow
AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://${POSTGRES_USER:-airflow}:${POSTGRES_PASSWORD:-airflow}@postgres/airflow
AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0
AIRFLOW__CORE__FERNET_KEY: ''
AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true'
AIRFLOW__CORE__LOAD_EXAMPLES: ${LOAD_EXAMPLES:-true}
AIRFLOW__API__AUTH_BACKENDS: 'airflow.api.auth.backend.basic_auth,airflow.api.auth.backend.session'
AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: 'true'
AIRFLOW__CORE__LAZY_LOAD_PLUGINS: 'false'
# WARNING: Use _PIP_ADDITIONAL_REQUIREMENTS option ONLY for a quick checks
# for other purpose (development, test and especially production usage) build/extend Airflow image.
_PIP_ADDITIONAL_REQUIREMENTS: ${_PIP_ADDITIONAL_REQUIREMENTS:-}
volumes:
- ${AIRFLOW_PROJ_DIR:-.}/dags:/opt/airflow/dags
- ${AIRFLOW_PROJ_DIR:-.}/logs:/opt/airflow/logs
- ${AIRFLOW_PROJ_DIR:-.}/config:/opt/airflow/config
- ${AIRFLOW_PROJ_DIR:-.}/plugins:/opt/airflow/plugins
- ${DBT_PROJ_DIR:-.}:/opt/airflow/dbt_project
- ${GCP_SERVICE_ACCOUNT_PATH:-.}:/opt/airflow/service_accounts
user: '${AIRFLOW_UID:-50000}:0'
depends_on: &airflow-common-depends-on
redis:
condition: service_healthy
postgres:
condition: service_healthy
networks:
- airbyte_airbyte_public
services:
postgres:
image: postgres:15
environment:
POSTGRES_USER: ${POSTGRES_USER:-airflow}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-airflow}
POSTGRES_DB: airflow
volumes:
- postgres-db-volume:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'airflow']
interval: 10s
retries: 5
start_period: 5s
restart: always
networks:
- airbyte_airbyte_public
redis:
image: redis:latest
expose:
- 6379
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
interval: 10s
timeout: 30s
retries: 50
start_period: 30s
restart: always
networks:
- airbyte_airbyte_public
airflow-webserver:
<<: *airflow-common
command: webserver
ports:
- '8080:8080'
healthcheck:
test: ['CMD', 'curl', '--fail', 'http://localhost:8080/health']
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-scheduler:
<<: *airflow-common
command: scheduler
healthcheck:
test: ['CMD', 'curl', '--fail', 'http://localhost:8974/health']
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-worker:
<<: *airflow-common
command: celery worker
healthcheck:
# yamllint disable rule:line-length
test:
- 'CMD-SHELL'
- 'celery --app airflow.providers.celery.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}" || celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"'
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
environment:
<<: *airflow-common-env
# Required to handle warm shutdown of the celery workers properly
# See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation
DUMB_INIT_SETSID: '0'
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-triggerer:
<<: *airflow-common
command: triggerer
healthcheck:
test:
[
'CMD-SHELL',
'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"',
]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-init:
<<: *airflow-common
entrypoint: /bin/bash
# yamllint disable rule:line-length
command:
- -c
- |
function ver() {
printf "%04d%04d%04d%04d" $${1//./ }
}
airflow_version=$$(AIRFLOW__LOGGING__LOGGING_LEVEL=INFO && gosu airflow airflow version)
airflow_version_comparable=$$(ver $${airflow_version})
min_airflow_version=2.2.0
min_airflow_version_comparable=$$(ver $${min_airflow_version})
if (( airflow_version_comparable < min_airflow_version_comparable )); then
echo
echo -e "\033[1;31mERROR!!!: Too old Airflow version $${airflow_version}!\e[0m"
echo "The minimum Airflow version supported: $${min_airflow_version}. Only use this or higher!"
echo
exit 1
fi
if [[ -z "${AIRFLOW_UID}" ]]; then
echo
echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m"
echo "If you are on Linux, you SHOULD follow the instructions below to set "
echo "AIRFLOW_UID environment variable, otherwise files will be owned by root."
echo "For other operating systems you can get rid of the warning with manually created .env file:"
echo " See: https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#setting-the-right-airflow-user"
echo
fi
one_meg=1048576
mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg))
cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat)
disk_available=$$(df / | tail -1 | awk '{print $$4}')
warning_resources="false"
if (( mem_available < 4000 )) ; then
echo
echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m"
echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))"
echo
warning_resources="true"
fi
if (( cpus_available < 2 )); then
echo
echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m"
echo "At least 2 CPUs recommended. You have $${cpus_available}"
echo
warning_resources="true"
fi
if (( disk_available < one_meg * 10 )); then
echo
echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m"
echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))"
echo
warning_resources="true"
fi
if [[ $${warning_resources} == "true" ]]; then
echo
echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m"
echo "Please follow the instructions to increase amount of resources available:"
echo " https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#before-you-begin"
echo
fi
mkdir -p /sources/logs /sources/dags /sources/plugins
chown -R "${AIRFLOW_UID}:0" /sources/{logs,dags,plugins}
exec /entrypoint airflow version
# yamllint enable rule:line-length
environment:
<<: *airflow-common-env
_AIRFLOW_DB_MIGRATE: 'true'
_AIRFLOW_WWW_USER_CREATE: 'true'
_AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow}
_AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow}
_PIP_ADDITIONAL_REQUIREMENTS: ''
user: '0:0'
volumes:
- ${AIRFLOW_PROJ_DIR:-.}:/sources
airflow-cli:
<<: *airflow-common
profiles:
- debug
environment:
<<: *airflow-common-env
CONNECTION_CHECK_MAX_COUNT: '0'
# Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252
command:
- bash
- -c
- airflow
# You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up
# or by explicitly targeted on the command line e.g. docker-compose up flower.
# See: https://docs.docker.com/compose/profiles/
flower:
<<: *airflow-common
command: celery flower
profiles:
- flower
ports:
- '5555:5555'
healthcheck:
test: ['CMD', 'curl', '--fail', 'http://localhost:5555/']
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
networks:
- airbyte_airbyte_public
volumes:
postgres-db-volume:
networks:
airbyte_airbyte_public:
external: true
================================================
FILE: airbyte_dbt_airflow_bigquery/orchestration/requirements.txt
================================================
dbt-core~=1.6.0
astronomer-cosmos~=1.1.0
astronomer-cosmos[dbt-bigquery]~=1.1.0
apache-airflow-providers-google~=10.9.0
apache-airflow-providers-airbyte~=3.3.2
================================================
FILE: airbyte_dbt_airflow_bigquery/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-airflow-bigquery",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"astronomer-cosmos[dbt-bigquery]",
"apache-airflow-providers-google",
"apache-airflow-providers-airbyte",
"apache-airflow",
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: airbyte_dbt_airflow_snowflake/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_airflow_snowflake/README.md
================================================
# Airbyte-dbt-Airflow-Snowflake Integration
Welcome to the "Airbyte-dbt-Airflow-Snowflake Integration" repository! This repo provides a quickstart template for building a full data stack using Airbyte, Airflow, dbt, and Snowflake. Easily extract data from Postgres and load it into Snowflake using Airbyte, and apply necessary transformations using dbt, all orchestrated seamlessly with Airflow. While this template doesn't delve into specific data or transformations, its goal is to showcase the synergy of these tools.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Airbyte-dbt-Airflow-Snowflake Integration](#airbyte-dbt-airflow-snowflake-integration)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Airflow Pipeline DAG](#airflow-pipeline-dag)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Setting Up the dbt Project](#3-setting-up-the-dbt-project)
- [4. Orchestrating with Airflow](#4-orchestrating-with-airflow)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Airflow Pipeline DAG

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_airflow_snowflake
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_airflow_snowflake
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and Snowflake connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, Snowflake. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your Snowflake connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your Snowflake instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to Snowflake.
## 4. Orchestrating with Airflow
[Airflow](https://airflow.apache.org/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Airflow to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Airflow orchestration configurations:
```bash
cd ../../orchestration
```
2. **Set Environment Variables**:
The Airflow pipeline requires certain environment variables to run successfully. The variables will be set using the `.env` file. Populate the `.env` file with the contents of the `.env.example` file and modify to suit your use case.
Particularly, modify the `AIRFLOW_AIRBYTE_CONN` value which is the connection URI that Airflow uses to connect to the Airbyte API. See [here](https://airflow.apache.org/docs/apache-airflow/2.0.2/howto/connection.html#connection-uri-format) for more details.
Also modify the `AIRBYTE_CONN_ID` value which is the id of the connection you have set up in Airbyte.
3. **Build and Run Airflow Locally**:
Build our Airflow image with the necessary packages and services
```bash
docker compose build
```
And then run it
```bash
docker compose up
```
4. **Access Airflow in Your Browser**:
When it's done, you can access the Airflow UI at `http://127.0.0.1:8080`. The default username and password are both `airflow`, unless you changed it on the `.env` file.
Here, you should see the DAG for the Extract, Load and Transformation pipeline. To get an overview of DAG, click on the DAG's name and select the Graph view. This will give you a clear picture of the process lineage and visualize how the operation flows from extraction to transformation.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Create dbt Sources for Airbyte Data**:
Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
2. **Add Your dbt Transformations**:
With your dbt sources in place, you can now build upon them. Add your custom SQL transformations in dbt, ensuring that you treat the sources as an upstream dependency. This ensures that your transformations work on the most up-to-date raw data.
3. **Execute the Pipeline in Airflow**:
Navigate to the Airflow UI and Trigger the DAG. This triggers the entire pipeline, encompassing the extraction via Airbyte, transformations via dbt, and any other subsequent steps. Modify the schedule as well to suit your use case.
4. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/models/example/my_first_dbt_model.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with source_data as (
select * from {{ source('snowflake', 'sample_table') }}
)
select *
from source_data
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/models/example/my_second_dbt_model.sql
================================================
-- Use the `ref` function to select from other models
select *
from {{ ref('my_first_dbt_model') }}
where id = 1
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: my_first_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
- name: my_second_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/models/sources.yml
================================================
version: 2
sources:
- name: snowflake
tables:
- name: sample_table
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
type: snowflake
account: "{{ env_var('DBT_SNOWFLAKE_ACCOUNT_ID', '') }}"
# User/password auth
user: username
password: "{{ env_var('DBT_SNOWFLAKE_PASSWORD', '') }}"
role: user_role
database: database_name
warehouse: warehouse_name
schema: dbt_schema
threads: 1
client_session_keep_alive: False
query_tag: anything
# optional
connect_retries: 0 # default 0
connect_timeout: 10 # default: 10
retry_on_database_errors: False # default: false
retry_all: False # default: false
reuse_connections: False # default: false (available v1.4+)
target: dev
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_airflow_snowflake/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
scan_changes_with_user_defined_cursor = {}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_snowflake" "snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_key_pair_authentication = {
auth_type = "Key Pair Authentication"
private_key = "...my_private_key..."
private_key_password = "...my_private_key_password..."
}
}
database = "AIRBYTE_DATABASE"
destination_type = "snowflake"
host = "accountname.us-east-2.aws.snowflakecomputing.com"
jdbc_url_params = "...my_jdbc_url_params..."
raw_data_schema = "...my_raw_data_schema..."
role = "AIRBYTE_ROLE"
schema = "AIRBYTE_SCHEMA"
username = "AIRBYTE_USER"
warehouse = "AIRBYTE_WAREHOUSE"
}
name = "Snowflake"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_snowflake" {
name = "Postgres to Snowflake"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_snowflake.snowflake.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
}
================================================
FILE: airbyte_dbt_airflow_snowflake/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_airflow_snowflake/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/.gitignore
================================================
logs
__pycache__
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/Dockerfile
================================================
FROM apache/airflow:2.7.2-python3.11
COPY requirements.txt /
RUN pip install --no-cache-dir -r /requirements.txt
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/README.md
================================================
# Airflow setup with Airbyte and DBT
This folder contains the code to setup Airflow with Airbyte and DBT.
## Setup
We're using the [Running Airflow in Docker](https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html) as a starting point.
We've downloaded the official `docker-compose.yaml` file provided by Airflow and adapted it to:
- Use some configurations from an .env file
- Add the Airbyte operator
- Mount our dbt project folder into the container image
- For running locally, we've set up the network to use the one deployed by the Airbyte container setup (from [Airbyte Local Deployment](https://docs.airbyte.com/deploying-airbyte/local-deployment))
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/airflow/dags/my_elt_dag.py
================================================
import pendulum, os
from datetime import timedelta
from airflow import DAG
from airflow.operators.bash import BashOperator
from airflow.operators.empty import EmptyOperator
from airflow.sensors.python import PythonSensor
from airflow.providers.airbyte.operators.airbyte import AirbyteTriggerSyncOperator
from airflow.providers.airbyte.sensors.airbyte import AirbyteJobSensor
from airflow.providers.airbyte.hooks.airbyte import AirbyteHook
AIRFLOW_AIRBYTE_CONN_ID = os.getenv("AIRFLOW_AIRBYTE_CONN") # The name of the Airflow connection to get connection information for Airbyte.
AIRBYTE_CONNECTION_ID = os.getenv("AIRBYTE_CONN_ID") # the Airbyte ConnectionId UUID between a source and destination.
DBT_DIR = "/opt/airflow/dbt_project"
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': pendulum.today('UTC').add(days=-1),
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
def check_airbyte_health():
airbyte_hook = AirbyteHook(airbyte_conn_id=AIRFLOW_AIRBYTE_CONN_ID)
is_healthy, message = airbyte_hook.test_connection()
print(message)
return is_healthy
with DAG(
dag_id='ELT_DAG',
default_args=default_args,
schedule='@daily',
) as dag:
start_pipeline_task = EmptyOperator(task_id="start_pipeline")
end_pipeline_task = EmptyOperator(task_id="end_pipeline")
airbyte_precheck_task = PythonSensor(
task_id="check_airbyte_health",
poke_interval=10,
timeout=3600,
mode="poke",
python_callable=check_airbyte_health,
)
trigger_airbyte_sync_task = AirbyteTriggerSyncOperator(
task_id='airbyte_trigger_sync',
airbyte_conn_id=AIRFLOW_AIRBYTE_CONN_ID,
connection_id=AIRBYTE_CONNECTION_ID,
asynchronous=True
)
wait_for_sync_completion_task = AirbyteJobSensor(
task_id='airbyte_check_sync',
airbyte_conn_id=AIRFLOW_AIRBYTE_CONN_ID,
airbyte_job_id=trigger_airbyte_sync_task.output
)
run_dbt_check_task = BashOperator(
task_id='run_dbt_precheck',
bash_command='pwd && dbt debug && dbt list',
cwd=DBT_DIR
)
run_dbt_model_task = BashOperator(
task_id='run_dbt_model',
bash_command='dbt run',
cwd=DBT_DIR
)
start_pipeline_task >> airbyte_precheck_task >> trigger_airbyte_sync_task \
>> [wait_for_sync_completion_task, run_dbt_check_task] \
>> run_dbt_model_task >> end_pipeline_task
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/airflow/plugins/static/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/airflow/plugins/templates/dbt/.gitkeep
================================================
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/docker-compose.yaml
================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Basic Airflow cluster configuration for CeleryExecutor with Redis and PostgreSQL.
#
# WARNING: This configuration is for local development. Do not use it in a production deployment.
#
# This configuration supports basic configuration using environment variables or an .env file
# The following variables are supported:
#
# AIRFLOW_IMAGE_NAME - Docker image name used to run Airflow.
# Default: apache/airflow:2.7.2
# AIRFLOW_UID - User ID in Airflow containers
# Default: 50000
# AIRFLOW_PROJ_DIR - Base path to which all the files will be volumed.
# Default: .
# Those configurations are useful mostly in case of standalone testing/running Airflow in test/try-out mode
#
# _AIRFLOW_WWW_USER_USERNAME - Username for the administrator account (if requested).
# Default: airflow
# _AIRFLOW_WWW_USER_PASSWORD - Password for the administrator account (if requested).
# Default: airflow
# _PIP_ADDITIONAL_REQUIREMENTS - Additional PIP requirements to add when starting all containers.
# Use this option ONLY for quick checks. Installing requirements at container
# startup is done EVERY TIME the service is started.
# A better way is to build a custom image or extend the official image
# as described in https://airflow.apache.org/docs/docker-stack/build.html.
# Default: ''
#
# Feel free to modify this file to suit your needs.
---
version: '3.8'
x-airflow-common: &airflow-common
# In order to add custom dependencies or upgrade provider packages you can use your extended image.
# Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml
# and uncomment the "build" line below, Then run `docker-compose build` to build the images.
# image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.7.2}
build: .
env_file:
- ./.env
environment: &airflow-common-env
AIRFLOW__CORE__EXECUTOR: CeleryExecutor
AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://${POSTGRES_USER:-airflow}:${POSTGRES_PASSWORD:-airflow}@postgres/airflow
# For backward compatibility, with Airflow <2.3
AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://${POSTGRES_USER:-airflow}:${POSTGRES_PASSWORD:-airflow}@postgres/airflow
AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://${POSTGRES_USER:-airflow}:${POSTGRES_PASSWORD:-airflow}@postgres/airflow
AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0
AIRFLOW__CORE__FERNET_KEY: ''
AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true'
AIRFLOW__CORE__LOAD_EXAMPLES: ${LOAD_EXAMPLES:-true}
AIRFLOW__API__AUTH_BACKENDS: 'airflow.api.auth.backend.basic_auth,airflow.api.auth.backend.session'
AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: 'true'
AIRFLOW__CORE__LAZY_LOAD_PLUGINS: 'false'
# WARNING: Use _PIP_ADDITIONAL_REQUIREMENTS option ONLY for a quick checks
# for other purpose (development, test and especially production usage) build/extend Airflow image.
_PIP_ADDITIONAL_REQUIREMENTS: ${_PIP_ADDITIONAL_REQUIREMENTS:-}
volumes:
- ${AIRFLOW_PROJ_DIR:-.}/dags:/opt/airflow/dags
- ${AIRFLOW_PROJ_DIR:-.}/logs:/opt/airflow/logs
- ${AIRFLOW_PROJ_DIR:-.}/config:/opt/airflow/config
- ${AIRFLOW_PROJ_DIR:-.}/plugins:/opt/airflow/plugins
- ${DBT_PROJ_DIR:-.}:/opt/airflow/dbt_project
user: '${AIRFLOW_UID:-50000}:0'
depends_on: &airflow-common-depends-on
redis:
condition: service_healthy
postgres:
condition: service_healthy
networks:
- airbyte_airbyte_public
services:
postgres:
image: postgres:15
environment:
POSTGRES_USER: ${POSTGRES_USER:-airflow}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-airflow}
POSTGRES_DB: airflow
volumes:
- postgres-db-volume:/var/lib/postgresql/data
healthcheck:
test: ['CMD', 'pg_isready', '-U', 'airflow']
interval: 10s
retries: 5
start_period: 5s
restart: always
networks:
- airbyte_airbyte_public
redis:
image: redis:latest
expose:
- 6379
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
interval: 10s
timeout: 30s
retries: 50
start_period: 30s
restart: always
networks:
- airbyte_airbyte_public
airflow-webserver:
<<: *airflow-common
command: webserver
ports:
- '8080:8080'
healthcheck:
test: ['CMD', 'curl', '--fail', 'http://localhost:8080/health']
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-scheduler:
<<: *airflow-common
command: scheduler
healthcheck:
test: ['CMD', 'curl', '--fail', 'http://localhost:8974/health']
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-worker:
<<: *airflow-common
command: celery worker
healthcheck:
# yamllint disable rule:line-length
test:
- 'CMD-SHELL'
- 'celery --app airflow.providers.celery.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}" || celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"'
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
environment:
<<: *airflow-common-env
# Required to handle warm shutdown of the celery workers properly
# See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation
DUMB_INIT_SETSID: '0'
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-triggerer:
<<: *airflow-common
command: triggerer
healthcheck:
test:
[
'CMD-SHELL',
'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"',
]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
airflow-init:
<<: *airflow-common
entrypoint: /bin/bash
# yamllint disable rule:line-length
command:
- -c
- |
function ver() {
printf "%04d%04d%04d%04d" $${1//./ }
}
airflow_version=$$(AIRFLOW__LOGGING__LOGGING_LEVEL=INFO && gosu airflow airflow version)
airflow_version_comparable=$$(ver $${airflow_version})
min_airflow_version=2.2.0
min_airflow_version_comparable=$$(ver $${min_airflow_version})
if (( airflow_version_comparable < min_airflow_version_comparable )); then
echo
echo -e "\033[1;31mERROR!!!: Too old Airflow version $${airflow_version}!\e[0m"
echo "The minimum Airflow version supported: $${min_airflow_version}. Only use this or higher!"
echo
exit 1
fi
if [[ -z "${AIRFLOW_UID}" ]]; then
echo
echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m"
echo "If you are on Linux, you SHOULD follow the instructions below to set "
echo "AIRFLOW_UID environment variable, otherwise files will be owned by root."
echo "For other operating systems you can get rid of the warning with manually created .env file:"
echo " See: https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#setting-the-right-airflow-user"
echo
fi
one_meg=1048576
mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg))
cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat)
disk_available=$$(df / | tail -1 | awk '{print $$4}')
warning_resources="false"
if (( mem_available < 4000 )) ; then
echo
echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m"
echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))"
echo
warning_resources="true"
fi
if (( cpus_available < 2 )); then
echo
echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m"
echo "At least 2 CPUs recommended. You have $${cpus_available}"
echo
warning_resources="true"
fi
if (( disk_available < one_meg * 10 )); then
echo
echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m"
echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))"
echo
warning_resources="true"
fi
if [[ $${warning_resources} == "true" ]]; then
echo
echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m"
echo "Please follow the instructions to increase amount of resources available:"
echo " https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#before-you-begin"
echo
fi
mkdir -p /sources/logs /sources/dags /sources/plugins
chown -R "${AIRFLOW_UID}:0" /sources/{logs,dags,plugins}
exec /entrypoint airflow version
# yamllint enable rule:line-length
environment:
<<: *airflow-common-env
_AIRFLOW_DB_MIGRATE: 'true'
_AIRFLOW_WWW_USER_CREATE: 'true'
_AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow}
_AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow}
_PIP_ADDITIONAL_REQUIREMENTS: ''
user: '0:0'
volumes:
- ${AIRFLOW_PROJ_DIR:-.}:/sources
airflow-cli:
<<: *airflow-common
profiles:
- debug
environment:
<<: *airflow-common-env
CONNECTION_CHECK_MAX_COUNT: '0'
# Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252
command:
- bash
- -c
- airflow
# You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up
# or by explicitly targeted on the command line e.g. docker-compose up flower.
# See: https://docs.docker.com/compose/profiles/
flower:
<<: *airflow-common
command: celery flower
profiles:
- flower
ports:
- '5555:5555'
healthcheck:
test: ['CMD', 'curl', '--fail', 'http://localhost:5555/']
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
restart: always
depends_on:
<<: *airflow-common-depends-on
airflow-init:
condition: service_completed_successfully
networks:
- airbyte_airbyte_public
volumes:
postgres-db-volume:
networks:
airbyte_airbyte_public:
external: true
================================================
FILE: airbyte_dbt_airflow_snowflake/orchestration/requirements.txt
================================================
dbt-core~=1.6.0
dbt-snowflake
apache-airflow-providers-airbyte~=3.3.2
================================================
FILE: airbyte_dbt_airflow_snowflake/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-airflow-snowflake",
packages=find_packages(),
install_requires=[
"dbt-snowflake",
"apache-airflow[airbyte]",
"apache-airflow",
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: airbyte_dbt_dagster/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_dagster/README.md
================================================
# Airbyte-dbt-Dagster Integration
Welcome to the "Airbyte-dbt-Dagster Integration" repository! This repo provides a quickstart template for building a full data stack using Airbyte, Dagster, dbt, and BigQuery. Easily extract data from Postgres, load it into BigQuery, and apply necessary transformations using dbt, all orchestrated seamlessly with Dagster. While this template doesn't delve into specific data or transformations, its goal is to showcase the synergy of these tools.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#3-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#4-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_dagster
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_dagster
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and BigQuery connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
## 4. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Create dbt Sources for Airbyte Data**:
Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
2. **Add Your dbt Transformations**:
With your dbt sources in place, you can now build upon them. Add your custom SQL transformations in dbt, ensuring that you treat the sources as an upstream dependency. This ensures that your transformations work on the most up-to-date raw data.
3. **Execute the Pipeline in Dagster**:
Navigate to the Dagster UI and click on "Materialize all". This triggers the entire pipeline, encompassing the extraction via Airbyte, transformations via dbt, and any other subsequent steps.
4. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: airbyte_dbt_dagster/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_dagster/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_dbt_dagster/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: airbyte_dbt_dagster/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster/dbt_project/models/example/my_first_dbt_model.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with source_data as (
select 1 as id
union all
select null as id
)
select *
from source_data
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: airbyte_dbt_dagster/dbt_project/models/example/my_second_dbt_model.sql
================================================
-- Use the `ref` function to select from other models
select *
from {{ ref('my_first_dbt_model') }}
where id = 1
================================================
FILE: airbyte_dbt_dagster/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: my_first_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
- name: my_second_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: airbyte_dbt_dagster/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: my_dataset
job_execution_timeout_seconds: 300
job_retries: 1
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: my_dataset_location
method: service-account
priority: interactive
project: my_project_id
threads: 1
type: bigquery
target: dev
================================================
FILE: airbyte_dbt_dagster/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_dagster/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:a6g5uWP/pt1/popVNlKwnTssWNfdYY4KVFPMisN/yvU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: airbyte_dbt_dagster/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = "...my_dataset_id..."
dataset_location = "...my_dataset_location..."
destination_type = "bigquery"
project_id = "...my_project_id..."
credentials_json = "...my_credentials_json_file_path..."
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_bigquery" {
name = "Postgres to BigQuery"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
}
================================================
FILE: airbyte_dbt_dagster/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_dagster/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: airbyte_dbt_dagster/orchestration/orchestration/__init__.py
================================================
================================================
FILE: airbyte_dbt_dagster/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance)
================================================
FILE: airbyte_dbt_dagster/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: airbyte_dbt_dagster/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: airbyte_dbt_dagster/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: airbyte_dbt_dagster/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: airbyte_dbt_dagster/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: airbyte_dbt_dagster/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: airbyte_dbt_dagster_snowflake/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_dagster_snowflake/README.md
================================================
# Airbyte-dbt-Dagster-Snowflake Integration
Welcome to the "Airbyte-dbt-Dagster-Snowflake Integration" repository! This repo provides a quickstart template for building a full data stack using Airbyte, Dagster, dbt, and Snowflake. Easily extract data from Postgres and load it into Snowflake using Airbyte, and apply necessary transformations using dbt, all orchestrated seamlessly with Dagster. While this template doesn't delve into specific data or transformations, its goal is to showcase the synergy of these tools.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Airbyte-dbt-Dagster-Snowflake Integration](#airbyte-dbt-dagster-snowflake-integration)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Pipeline DAG](#pipeline-dag)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Setting Up the dbt Project](#3-setting-up-the-dbt-project)
- [4. Orchestrating with Dagster](#4-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Pipeline DAG

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_dagster_snowflake
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_dagster_snowflake
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and Snowflake connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, Snowflake. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your Snowflake connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your Snowflake instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to Snowflake.
## 4. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd ../../orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Create dbt Sources for Airbyte Data**:
Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
2. **Add Your dbt Transformations**:
With your dbt sources in place, you can now build upon them. Add your custom SQL transformations in dbt, ensuring that you treat the sources as an upstream dependency. This ensures that your transformations work on the most up-to-date raw data.
3. **Execute the Pipeline in Dagster**:
Navigate to the Dagster UI and click on "Materialize all". This triggers the entire pipeline, encompassing the extraction via Airbyte, transformations via dbt, and any other subsequent steps.
4. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/models/example/my_first_dbt_model.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with source_data as (
select * from {{ source('snowflake', 'sample_table') }}
)
select *
from source_data
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/models/example/my_second_dbt_model.sql
================================================
-- Use the `ref` function to select from other models
select *
from {{ ref('my_first_dbt_model') }}
where id = 1
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: my_first_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
- name: my_second_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/models/sources.yml
================================================
version: 2
sources:
- name: snowflake
tables:
- name: sample_table
meta:
dagster:
asset_key: ["sample_table"] # This metadata specifies the corresponding Dagster asset for this dbt source.
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
type: snowflake
account: "{{ env_var('DBT_SNOWFLAKE_ACCOUNT_ID', '') }}"
# User/password auth
user: username
password: "{{ env_var('DBT_SNOWFLAKE_PASSWORD', '') }}"
role: user_role
database: database_name
warehouse: warehouse_name
schema: dbt_schema
threads: 1
client_session_keep_alive: False
query_tag: anything
# optional
connect_retries: 0 # default 0
connect_timeout: 10 # default: 10
retry_on_database_errors: False # default: false
retry_all: False # default: false
reuse_connections: False # default: false (available v1.4+)
target: dev
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster_snowflake/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_dagster_snowflake/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_dagster_snowflake/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
source_postgres_ssl_modes_allow = {
mode = "allow"
}
}
tunnel_method = {
source_postgres_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
replication_method = {
source_postgres_update_method_scan_changes_with_user_defined_cursor = {
method = "Standard"
}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_snowflake" "snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_key_pair_authentication = {
auth_type = "Key Pair Authentication"
private_key = "...my_private_key..."
private_key_password = "...my_private_key_password..."
}
}
database = "AIRBYTE_DATABASE"
destination_type = "snowflake"
host = "accountname.us-east-2.aws.snowflakecomputing.com"
jdbc_url_params = "...my_jdbc_url_params..."
raw_data_schema = "...my_raw_data_schema..."
role = "AIRBYTE_ROLE"
schema = "AIRBYTE_SCHEMA"
username = "AIRBYTE_USER"
warehouse = "AIRBYTE_WAREHOUSE"
}
name = "Snowflake"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_snowflake" {
name = "Postgres to Snowflake"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_snowflake.snowflake.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
}
================================================
FILE: airbyte_dbt_dagster_snowflake/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_dagster_snowflake/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/orchestration/__init__.py
================================================
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance)
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: airbyte_dbt_dagster_snowflake/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-snowflake",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: airbyte_dbt_dagster_snowflake/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster-snowflake",
packages=find_packages(),
install_requires=[
"dbt-snowflake",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: airbyte_dbt_prefect_bigquery/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_prefect_bigquery/README.md
================================================
# Airbyte-dbt-Prefect-BigQuery Integration
Welcome to the Prefect, Airbyte, dbt (PAD) Stack with BigQuery quickstart! This repo contains the code to show how to utilize Airbyte and dbt for data extraction and transformation, and utilize Prefect to orchestrate the data workflows, providing a end-to-end ELT pipeline. With this setup, you can pull fake e-commerce data, put it into BigQuery, and play around with it using dbt and Prefect.
## Infrastructure Layout

## Pipeline DAG

## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors](#3-setting-up-airbyte-connectors)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Prefect](#5)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte locally. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform (Optional)**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli). This is an optional step because you can also create and manage Airbyte resources via the UI. Both ways will be described below.
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_prefect_bigquery
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_prefect_bigquery
```
At this point you can view the code in your preferred IDE.
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting up BigQuery
If you don't have a Google Cloud account, you can sign up and get free credits, which are more than enough to implement this project.
1. **Create a Google Cloud project**:
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
2. **Create BigQuery datasets**:
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
3. **Create a Service Account and Assign Roles**:
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account.
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
4. **Generate a JSON key for the Service Account**:
- Make a JSON key to let the service account sign in.
**How to generate a JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
## 3. Setting Up Airbyte Connectors
To set up your Airbyte connectors, you can choose to do it via Terraform, or the UI. Choose one of the two following options.
### 3.1. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations via Terraform, facilitating data synchronization between various platforms. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs:
- Provide credentials for your BigQuery connection in the `main.tf` file.
- `dataset_id`: The name of the BigQuery dataset where Airbyte will load data. In this case, enter “raw_data”.
- `project_id`: Your BigQuery project ID.
- `credentials_json`: The contents of the service account JSON file. You should input a string, so you need to convert the JSON content to string beforehand.
- `workspace_id`: Your Airbyte workspace ID, which can be found in the webapp url. For example, in this url: http://localhost:8000/workspaces/910ab70f-0a67-4d25-a983-999e99e1e395/ the workspace id would be `910ab70f-0a67-4d25-a983-999e99e1e395`.
- Alternatively, you can utilize the `variables.tf` file to manage these credentials:
- You’ll be prompted to enter the credentials when you execute `terraform plan` and `terraform apply`. If going for this option, just move to the next step. If you don’t want to use variables, remove them from the file.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go 🎉.
### 3.2. Setting Up Airbyte Connectors Using the UI
Start by launching the Airbyte UI by going to http://localhost:8000/ in your browser. Then:
1. **Create a source**:
- Go to the Sources tab and click on `+ New source`.
- Search for “faker” using the search bar and select `Sample Data (Faker)`.
- Adjust the Count and optional fields as needed for your use case. You can also leave as is.
- Click on `Set up source`.
2. **Create a destination**:
- Go to the Destinations tab and click on `+ New destination`.
- Search for “bigquery” using the search bar and select `BigQuery`.
- Enter the connection details as needed.
- For simplicity, you can use `Standard Inserts` as the loading method.
- In the `Service Account Key JSON` field, enter the contents of the JSON file. Yes, the full JSON.
- Click on `Set up destination`.
3. **Create a connection**:
- Go to the Connections tab and click on `+ New connection`.
- Select the source and destination you just created.
- Enter the connection details as needed.
- Click on `Set up connection`.
That’s it! Your connection is set up and ready to go! 🎉
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Move to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
- You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details. Specifically, you need to update the Service Account JSON file path, the dataset location and your BigQuery project ID.
- Provide your BigQuery project ID in the `database` field of the `/models/ecommerce/sources/faker_sources.yml` file.
If you want to avoid hardcoding credentials in the `profiles.yml` file, you can leverage environment variables. Here's an example: `keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"`
3. **Test the Connection (Optional)**:
You can test the connection to your BigQuery instance using the following command. Just take into account that you would need to provide the local path to your service account key file instead.
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery 🎉.
## 5. Orchestrating with Prefect
[Prefect](https://prefect.io/) is an orchestration workflow tool that makes it easy to build, run, and monitor data workflows by writing Python code. In this section, we'll walk you through creating a Prefect flow to orchestrate both Airbyte extract and load operations, and dbt transformations with Python:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Prefect orchestration configurations:
```bash
cd ../orchestration
```
2. **Update the Airbyte Connection ID**:
Open the `my_elt_flow.py` Python script and update the `connection_id` key in the `airbyte_connection` object.
To find your connection id go to the [Airbyte UI](http://localhost:8000/), then select the connection you want to use from the "Connections" tab and copy the ID from the URL (you'll find it after `/connections/`, i.e., `e3646db8-6612-4142-8edf-1e51932b6836`).
3. **Set Environment Variables**:
Prefect requires certain environment variables to be set to interact with other tools, like Airbyte. Set the following variables:
```bash
export AIRBYTE_PASSWORD=password
```
Additionally, set the following environment variable to avoid unnecessary notifications from Prefect:
```bash
export PREFECT_API_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED=false
```
4. **Connect to Prefect's API**:
Open a new terminal window. Start a local Prefect server instance in your virtual environment:
```bash
prefect server start
```
5. **Deploy the Flow**:
Go back to your previous terminal and execute the following python script:
```bash
python my_elt_flow.py
```
When you run the flow script, Prefect will automatically create a flow deployment that you can interact with via the UI and API. The script will stay running so that it can listen for scheduled or triggered runs of this flow; once a run is found, it will be executed within a subprocess.
6. **Access Prefect UI in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:4200
```
You can now begin interacting with your newly created deployment! 🎉
## Next Steps
Congratulations on deploying and running this quickstart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Expand Your Data Sources**
- Add more data sources to Airbyte. Explore different types of sources available, and see how they can enrich your existing datasets and broaden your analytical capabilities.
### 4. **Enhance Data Quality and Testing**
- Implement data quality tests in dbt to ensure the reliability and accuracy of your transformations. Use dbt's testing features to validate your data and catch issues early on.
### 5. **Scale Your Setup**
- Consider scaling your setup to handle more data, more sources, and more transformations. Optimize your configurations and resources to ensure smooth and efficient processing of larger datasets.
### 7. **Contribute to the Community**
- Share your learnings, optimizations, and new configurations with the community. Contribute to the respective tool’s communities and help others learn and grow.
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/marts/product_popularity.sql
================================================
WITH base AS (
SELECT
product_id,
COUNT(id) AS purchase_count
FROM {{ ref('stg_purchases') }}
GROUP BY 1
)
SELECT
p.id,
p.make,
p.model,
b.purchase_count
FROM {{ ref('stg_products') }} p
LEFT JOIN base b ON p.id = b.product_id
ORDER BY b.purchase_count DESC
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/marts/purchase_patterns.sql
================================================
SELECT
user_id,
product_id,
purchased_at,
added_to_cart_at,
TIMESTAMP_DIFF(purchased_at, added_to_cart_at, SECOND) AS time_to_purchase_seconds,
returned_at
FROM {{ ref('stg_purchases') }}
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/marts/user_demographics.sql
================================================
WITH base AS (
SELECT
id AS user_id,
gender,
academic_degree,
nationality,
age
FROM {{ ref('stg_users') }}
)
SELECT
gender,
academic_degree,
nationality,
AVG(age) AS average_age,
COUNT(user_id) AS user_count
FROM base
GROUP BY 1, 2, 3
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/sources/faker_sources.yml
================================================
version: 2
sources:
- name: faker
database: your_bigquery_project_id # Update with your BigQuery project ID
schema: raw_data
tables:
- name: users
description: "Simulated user data from the Faker connector."
columns:
- name: id
description: "Unique identifier for the user."
- name: address
- name: occupation
- name: gender
- name: academic_degree
- name: weight
- name: created_at
- name: language
- name: telephone
- name: title
- name: updated_at
- name: nationality
- name: blood_type
- name: name
- name: age
- name: email
- name: height
- name: _airbyte_extracted_at
- name: _airbyte_raw_id
- name: _airbyte_meta
- name: products
description: "Simulated product data from the Faker connector."
columns:
- name: id
description: "Unique identifier for the product."
- name: updated_at
- name: year
- name: price
- name: created_at
- name: model
- name: make
- name: _airbyte_extracted_at
- name: _airbyte_raw_id
- name: _airbyte_meta
- name: purchases
description: "Simulated purchase data from the Faker connector."
columns:
- name: id
description: "Unique identifier for the purchase."
- name: updated_at
- name: purchased_at
- name: user_id
- name: returned_at
- name: product_id
- name: created_at
- name: added_to_cart_at
- name: _airbyte_extracted_at
- name: _airbyte_raw_id
- name: _airbyte_meta
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/staging/stg_products.sql
================================================
select
id,
year,
price,
model,
make,
created_at,
updated_at,
_airbyte_extracted_at,
from {{ source('faker', 'products') }}
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/staging/stg_purchases.sql
================================================
select
id,
user_id,
product_id,
updated_at,
purchased_at,
returned_at,
created_at,
added_to_cart_at,
_airbyte_extracted_at,
from {{ source('faker', 'purchases') }}
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/models/staging/stg_users.sql
================================================
select
id,
gender,
academic_degree,
title,
nationality,
age,
name,
email,
created_at,
updated_at,
_airbyte_extracted_at,
from {{ source('faker', 'users') }}
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: my_dataset_location
method: service-account
priority: interactive
project: my_project_id
threads: 1
type: bigquery
target: dev
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_bigquery/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_bigquery/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_prefect_bigquery/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_faker" "faker" {
configuration = {
always_updated = false
count = 1000
parallelism = 9
records_per_slice = 10
seed = 6
source_type = "faker"
}
name = "Faker"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = "raw_data"
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = file(var.credentials_json_path)
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "faker_to_bigquery" {
name = "Faker to BigQuery"
source_id = airbyte_source_faker.faker.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "users"
},
{
name = "products"
},
{
name = "purchases"
},
]
}
}
================================================
FILE: airbyte_dbt_prefect_bigquery/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_prefect_bigquery/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json_path" {
type = string
}
================================================
FILE: airbyte_dbt_prefect_bigquery/orchestration/my_elt_flow.py
================================================
import os
from prefect import flow, task
from prefect_airbyte.server import AirbyteServer
from prefect_airbyte.connections import AirbyteConnection, AirbyteSyncResult
from prefect_airbyte.flows import run_connection_sync
from prefect_dbt.cli.commands import DbtCoreOperation
remote_airbyte_server = AirbyteServer(
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD"),
server_host="localhost",
server_port="8000"
)
remote_airbyte_server.save("my-remote-airbyte-server", overwrite=True)
airbyte_connection = AirbyteConnection(
airbyte_server=remote_airbyte_server,
connection_id="...my_airbyte_connection_id...", # Replace the value with your Airbyte connection ID
status_updates=True,
)
@task(name="Extract, Load with Airbyte")
def run_airbyte_sync(connection: AirbyteConnection) -> AirbyteSyncResult:
job_run = connection.trigger()
job_run.wait_for_completion()
return job_run.fetch_result()
def run_dbt_commands(commands, prev_task_result):
dbt_task = DbtCoreOperation(
commands=commands,
project_dir="../dbt_project",
profiles_dir="../dbt_project",
wait_for=prev_task_result
)
return dbt_task
@flow(log_prints=True)
def my_elt_flow():
# run Airbyte sync
airbyte_sync_result = run_airbyte_sync(airbyte_connection)
# run dbt precheck
dbt_init_task = task(name="dbt Precheck")(run_dbt_commands)(
commands=["pwd", "dbt debug", "dbt list"],
prev_task_result=airbyte_sync_result
)
dbt_init_task.run()
# run dbt models
dbt_run_task = task(name="Transform with dbt")(run_dbt_commands)(
commands=["dbt run"],
prev_task_result=dbt_init_task
)
dbt_run_task.run()
if __name__ == "__main__":
my_elt_flow()
================================================
FILE: airbyte_dbt_prefect_bigquery/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-prefect-bigquery",
packages=find_packages(),
install_requires=[
"prefect",
"prefect-airbyte",
"prefect-dbt",
"dbt-bigquery",
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: airbyte_dbt_prefect_snowflake/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_prefect_snowflake/README.md
================================================
# Airbyte-dbt-Prefect-Snowflake Integration
Welcome to the "Airbyte-dbt-Prefect-Snowflake Integration" repository! This repo provides a quickstart template for building a full data stack using Airbyte, Prefect, dbt, and Snowflake. Easily extract data from Postgres and load it into Snowflake using Airbyte, and apply necessary transformations using dbt, all orchestrated seamlessly with Prefect. While this template doesn't delve into specific data or transformations, its goal is to showcase the synergy of these tools.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Airbyte-dbt-Prefect-Snowflake Integration](#airbyte-dbt-prefect-snowflake-integration)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Setting Up the dbt Project](#3-setting-up-the-dbt-project)
- [4. Orchestrating with Prefect](#4-orchestrating-with-Prefect)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_Prefect_snowflake
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_prefect_snowflake
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and Snowflake connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, Snowflake. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your Snowflake connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your Snowflake instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to Snowflake.
## 4. Orchestrating with Prefect
[Prefect](https://prefect.io/) is an orchestration workflow tool that makes it easy to build, run, and monitor data workflows by writing Python code. In this section, we'll walk you through creating a Prefect flow to orchestrate both Airbyte extract and load operations, and dbt transformations with Python:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Prefect orchestration configurations:
```bash
cd ../orchestration
```
2. **Set Environment Variables**:
Prefect requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export AIRBYTE_PASSWORD=password
```
3. **Connect to Prefect's API**:
Open a new terminal window. Start a local Prefect server instance in your virtual environment:
```bash
prefect server start
```
4. **Deploy the Flow**:
When we run the flow script, Prefect will automatically create a flow deployment that you can interact with via the UI and API. The script will stay running so that it can listen for scheduled or triggered runs of this flow; once a run is found, it will be executed within a subprocess.
```bash
python my_elt_flow.py
```
5. **Access Prefect UI in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:4200
```
You can now begin interacting with your newly created deployment!
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Create dbt Sources for Airbyte Data**:
- Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
2. **Add Your dbt Transformations**:
- With your dbt sources in place, you can now build upon them. Add your custom SQL transformations in dbt, ensuring that you treat the sources as an upstream dependency. This ensures that your transformations work on the most up-to-date raw data.
### 3. **Extend the Prefect Pipeline**:
- You can create flow runs from this deployment via API calls to be triggered by new data sync in Airbyte rather than on a schedule. You can customize your dbt runs based on the results got from AirbyteSyncResult. You can also migrate the deployment to the Prefect cloud.
4. **Extend the Project**:
- The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/models/example/my_first_dbt_model.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with source_data as (
select * from {{ source('snowflake', 'sample_table') }}
)
select *
from source_data
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/models/example/my_second_dbt_model.sql
================================================
-- Use the `ref` function to select from other models
select *
from {{ ref('my_first_dbt_model') }}
where id = 1
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: my_first_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
- name: my_second_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/models/sources.yml
================================================
version: 2
sources:
- name: snowflake
tables:
- name: sample_table
meta:
dagster:
asset_key: ["sample_table"] # This metadata specifies the corresponding Dagster asset for this dbt source.
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
type: snowflake
account: "{{ env_var('DBT_SNOWFLAKE_ACCOUNT_ID', '') }}"
# User/password auth
user: username
password: "{{ env_var('DBT_SNOWFLAKE_PASSWORD', '') }}"
role: user_role
database: database_name
warehouse: warehouse_name
schema: dbt_schema
threads: 1
client_session_keep_alive: False
query_tag: anything
# optional
connect_retries: 0 # default 0
connect_timeout: 10 # default: 10
retry_on_database_errors: False # default: false
retry_all: False # default: false
reuse_connections: False # default: false (available v1.4+)
target: dev
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_snowflake/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_prefect_snowflake/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_prefect_snowflake/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
scan_changes_with_user_defined_cursor = {}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_snowflake" "snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_key_pair_authentication = {
auth_type = "Key Pair Authentication"
private_key = "...my_private_key..."
private_key_password = "...my_private_key_password..."
}
}
database = "AIRBYTE_DATABASE"
destination_type = "snowflake"
host = "accountname.us-east-2.aws.snowflakecomputing.com"
jdbc_url_params = "...my_jdbc_url_params..."
raw_data_schema = "...my_raw_data_schema..."
role = "AIRBYTE_ROLE"
schema = "AIRBYTE_SCHEMA"
username = "AIRBYTE_USER"
warehouse = "AIRBYTE_WAREHOUSE"
}
name = "Snowflake"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_snowflake" {
name = "Postgres to Snowflake"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_snowflake.snowflake.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
}
================================================
FILE: airbyte_dbt_prefect_snowflake/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_prefect_snowflake/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: airbyte_dbt_prefect_snowflake/orchestration/my_elt_flow.py
================================================
import os
from prefect import flow, task
from prefect_airbyte.server import AirbyteServer
from prefect_airbyte.connections import AirbyteConnection, AirbyteSyncResult
from prefect_airbyte.flows import run_connection_sync
from prefect_dbt.cli.commands import DbtCoreOperation
remote_airbyte_server = AirbyteServer(
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD"),
server_host="localhost",
server_port="8000"
)
remote_airbyte_server.save("my-remote-airbyte-server", overwrite=True)
airbyte_connection = AirbyteConnection(
airbyte_server=remote_airbyte_server,
connection_id="...my_airbyte_connection_id...",
status_updates=True,
)
@task(name="Extract, Load with Airbyte")
def run_airbyte_sync(connection: AirbyteConnection) -> AirbyteSyncResult:
job_run = connection.trigger()
job_run.wait_for_completion()
return job_run.fetch_result()
def run_dbt_commands(commands, prev_task_result):
dbt_task = DbtCoreOperation(
commands=commands,
project_dir="../dbt_project",
profiles_dir="../dbt_project",
wait_for=prev_task_result
)
return dbt_task
@flow(log_prints=True)
def my_elt_flow():
# run Airbyte sync
# airbyte_sync_result: AirbyteSyncResult = run_connection_sync(
# airbyte_connection=airbyte_connection,
# )
airbyte_sync_result = run_airbyte_sync(airbyte_connection)
# run dbt precheck
dbt_init_task = task(name="dbt Precheck")(run_dbt_commands)(
commands=["pwd", "dbt debug", "dbt list"],
prev_task_result=airbyte_sync_result
)
dbt_init_task.run()
# run dbt models
dbt_run_task = task(name="Transform with dbt")(run_dbt_commands)(
commands=["dbt run"],
prev_task_result=dbt_init_task
)
dbt_run_task.run()
if __name__ == "__main__":
# my_elt_flow.visualize()
my_elt_flow()
================================================
FILE: airbyte_dbt_prefect_snowflake/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-prefect-snowflake",
packages=find_packages(),
install_requires=[
"dbt-snowflake",
"prefect",
"prefect-airbyte",
"prefect-dbt",
"dbt-core>=1.4.0",
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: airbyte_dbt_snowflake_looker/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: airbyte_dbt_snowflake_looker/README.md
================================================
# Airbyte-dbt-Snowflake-Looker Integration
Welcome to the "Airbyte-dbt-Snowflake-Looker Integration" repository! This repo provides a quickstart template for building a full data stack using Airbyte, Dagster, dbt, Snowflake and Looker. Easily extract data from Postgres and load it into Snowflake using Airbyte, and apply necessary transformations using dbt, all orchestrated seamlessly with Dagster. Then connect your Snowflake instance to Looker for business intelligence, analytics, data modeling, etc. While this template doesn't delve into specific data or transformations, its goal is to showcase the synergy of these tools.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Airbyte-dbt-Snowflake-Looker Integration](#airbyte-dbt-snowflake-looker-integration)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Pipeline DAG](#pipeline-dag)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Setting Up the dbt Project](#3-setting-up-the-dbt-project)
- [4. Orchestrating with Dagster](#4-orchestrating-with-dagster)
- [5. Integrating with Looker](#5-integrating-with-looker)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Pipeline DAG

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add airbyte_dbt_dagster_snowflake
```
2. **Navigate to the directory**:
```bash
cd airbyte_dbt_dagster_snowflake
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and Snowflake connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, Snowflake. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your Snowflake connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your Snowflake instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to Snowflake.
## 4. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd ../../orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
## 5. Integrating with Looker
Looker is a product that helps you explore, share, and visualize your company's data so that you can make better business decisions. It is an enterprise platform for BI, data applications, and embedded analytics that helps you explore and share insights in real time. It also has the ability to convert user input via a Graphical User Interface (GUI) into SQL queries and subsequently transmit them directly to the database in real-time. To get started with Looker and learn more about it, check [here](https://cloud.google.com/looker/docs).
Follow the steps below to integrate your Snowflake instance with your Looker studio.
1. Create a Looker User in Snowflake
To allow Looker run queries in Snowflake, you need to create a dedicated user for the looker instance in Snowflake and provision access for the user. Run the queries below in Snowflake to do this.
You can add in the `ON FUTURE` keyword to persist `GRANT` statements for future tables and objects in the database to prevent re-running the GRANT statements as new tables are created.
```sql
-- change role to ACCOUNTADMIN
use role ACCOUNTADMIN;
-- create role for looker
create role if not exists looker_role;
grant role looker_role to role SYSADMIN;
-- Note that we are not making the looker_role a SYSADMIN, but rather granting users with the SYSADMIN role to modify the looker_role
-- create a user for looker
create user if not exists looker_user
password = '';
grant role looker_role to user looker_user;
alter user looker_user
set default_role = looker_role
default_warehouse = 'looker_wh';
-- this part is to executed only if the user roles are to be changed
-- change role
use role SYSADMIN;
-- create a warehouse for looker (optional)
create warehouse if not exists looker_wh
-- set the size based on your dataset
warehouse_size = medium
warehouse_type = standard
auto_suspend = 1800
auto_resume = true
initially_suspended = true;
grant all privileges
on warehouse looker_wh
to role looker_role;
-- grant read only database access (repeat for all database/schemas)
grant usage on database to role looker_role;
grant usage on schema . to role looker_role;
-- rerun the following any time a table is added to the schema
grant select on all tables in schema . to role looker_role;
-- or
grant select on future tables in schema . to role looker_role;
-- create schema for looker to write back to
use database ;
create schema if not exists looker_scratch;
use role ACCOUNTADMIN;
grant ownership on schema looker_scratch to role SYSADMIN revoke current grants;
grant all on schema looker_scratch to role looker_role;
```
2. Adding the Database Connection in Looker
After creating the looker user in snowflake, we need to create a connection from Looker to Snowflake using our Snowflake credentials and the looker user we just created in Snowflake. To do this, follow the steps below.
- Navigate to the **Admin** panel of the Looker interface.
- Select **Connections**.
- Click **Add Connection**. A Configuration Section will open up where you will be required to fill the connection details below. For more details see [here](https://cloud.google.com/looker/docs/connecting-to-your-db).
- **Name**: Give the connection an Arbitrary name.
- **Dialect**: Select **Snowflake**.
- **Host**: It is of the format .snowflakecomputing.com. See [here](https://docs.snowflake.com/developer-guide/jdbc/jdbc-configure#connection-parameters) to validate your host value.
- **Port**: The default is 443.
- **Database**: Provide the name of the default database that is required for use. Note that this field is case-sensitive.
- **Schema**: This the default Database Schema that is used in your Snowflake Deployment.
- **Authentication**: Select **Database Account** or **OAuth**:
- Use **Database Account** to specify the Username and Password of the Snowflake user account that will be used to connect to Looker.
- Follow the steps below to get credentials to use **OAuth** for the connection.
To set up an OAuth based connection, you will require a user account with `ACCOUNTADMIN` permission on Snowflake. Firstly you are required to run the following command in Snowflake, where `` is the hostname of the Looker Instance:
```sql
CREATE SECURITY INTEGRATION LOOKER
TYPE = OAUTH
ENABLED = TRUE
OAUTH_CLIENT = LOOKER
OAUTH_REDIRECT_URI = 'https:///external_oauth/redirect';
```
To obtain the OAuth Client ID and Client Secret, you need to run the following command:
```sql
SELECT SYSTEM$SHOW_OAUTH_CLIENT_SECRETS('LOOKER');
```
Paste in the OAUTH_CLIENT_ID and OAUTH_CLIENT_SECRET values in the Looker OAuth fields.
- **Enable PDTs**: Use this to enable persistent derived tables (PDTs). See [here](https://cloud.google.com/looker/docs/derived-tables#persistent-derived-tables) for more information.
- **Temp Database**: If PDTs [Persistent Derived Tables] are enabled, this section needs to be set to a Database Schema where the user has full privileges to create, drop, rename, and alter tables.
- **(Optional) Max Connections per node**: See more about this [here](https://cloud.google.com/looker/docs/connecting-to-your-db#max_connections).
- **Database Time Zone**: Default is UTC.
- **Query Time Zone**: Default is UTC.
- Additional JDBC parameters: Add additional JDBC parameters from the Snowflake JDBC driver.
- Add `warehouse=`.
- Additionally, by default, Looker will set the following Snowflake parameters on each session:
- `TIMESTAMP_TYPE_MAPPING=TIMESTAMP_LTZ`
- `JDBC_TREAT_DECIMAL_AS_INT=FALSE`
- `TIMESTAMP_INPUT_FORMAT=AUTO`
- `AUTOCOMMIT=TRUE`
You can override each of these parameters by setting an alternative value in the Additional JDBC parameters field, for example: `&AUTOCOMMIT=FALSE`
- Click on **Test** to check if the connection is Successful. For troubleshooting, see [here](https://cloud.google.com/looker/docs/testing-db-connectivity).
- Click **Connect** to save these settings.
Now that the connection to Snowflake has been created, you are good to go to explore your Snowflake data in Looker.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Create dbt Sources for Airbyte Data**:
Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
2. **Add Your dbt Transformations**:
With your dbt sources in place, you can now build upon them. Add your custom SQL transformations in dbt, ensuring that you treat the sources as an upstream dependency. This ensures that your transformations work on the most up-to-date raw data.
3. **Execute the Pipeline in Dagster**:
Navigate to the Dagster UI and click on "Materialize all". This triggers the entire pipeline, encompassing the extraction via Airbyte, transformations via dbt, and any other subsequent steps.
4. **Explore in Looker**:
You can use the SQL Runner to create queries and Explores, create and share Looks (reports and dashboards), or use LookML to create a data model that Looker will use to query your data. However way you wish to go with your Snowflake data, the choice is yours.
5. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/macros/.gitkeep
================================================
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/models/example/my_first_dbt_model.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with source_data as (
select * from {{ source('snowflake', 'sample_table') }}
)
select *
from source_data
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/models/example/my_second_dbt_model.sql
================================================
-- Use the `ref` function to select from other models
select *
from {{ ref('my_first_dbt_model') }}
where id = 1
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: my_first_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
- name: my_second_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/models/sources.yml
================================================
version: 2
sources:
- name: snowflake
tables:
- name: sample_table
meta:
dagster:
asset_key: ["sample_table"] # This metadata specifies the corresponding Dagster asset for this dbt source.
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
type: snowflake
account: "{{ env_var('DBT_SNOWFLAKE_ACCOUNT_ID', '') }}"
# User/password auth
user: username
password: "{{ env_var('DBT_SNOWFLAKE_PASSWORD', '') }}"
role: user_role
database: database_name
warehouse: warehouse_name
schema: dbt_schema
threads: 1
client_session_keep_alive: False
query_tag: anything
# optional
connect_retries: 0 # default 0
connect_timeout: 10 # default: 10
retry_on_database_errors: False # default: false
retry_all: False # default: false
reuse_connections: False # default: false (available v1.4+)
target: dev
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: airbyte_dbt_snowflake_looker/dbt_project/tests/.gitkeep
================================================
================================================
FILE: airbyte_dbt_snowflake_looker/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_dbt_snowflake_looker/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
scan_changes_with_user_defined_cursor = {}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_snowflake" "snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_key_pair_authentication = {
auth_type = "Key Pair Authentication"
private_key = "...my_private_key..."
private_key_password = "...my_private_key_password..."
}
}
database = "AIRBYTE_DATABASE"
destination_type = "snowflake"
host = "accountname.us-east-2.aws.snowflakecomputing.com"
jdbc_url_params = "...my_jdbc_url_params..."
raw_data_schema = "...my_raw_data_schema..."
role = "AIRBYTE_ROLE"
schema = "AIRBYTE_SCHEMA"
username = "AIRBYTE_USER"
warehouse = "AIRBYTE_WAREHOUSE"
}
name = "Snowflake"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_snowflake" {
name = "Postgres to Snowflake"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_snowflake.snowflake.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
}
================================================
FILE: airbyte_dbt_snowflake_looker/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: airbyte_dbt_snowflake_looker/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/orchestration/__init__.py
================================================
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance)
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: airbyte_dbt_snowflake_looker/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-snowflake",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: airbyte_dbt_snowflake_looker/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-snowflake-looker",
packages=find_packages(),
install_requires=[
"dbt-snowflake",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: airbyte_lib_notebooks/AirbyteLib_Basic_Features_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo has been be moved. Click [here](https://github.com/airbytehq/quickstarts/blob/main/pyairbyte_notebooks/PyAirbyte_Basic_Features_Demo.ipynb) to go to the demo. "
]
}
],
"metadata": {
"colab": {
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: airbyte_lib_notebooks/AirbyteLib_CoinAPI_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo has been be moved. Click [here](https://github.com/airbytehq/quickstarts/blob/main/pyairbyte_notebooks/PyAirbyte_CoinAPI_Demo.ipynb) to go to the demo. "
]
}
],
"metadata": {
"colab": {
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: airbyte_lib_notebooks/AirbyteLib_GA4_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo has been be moved. Click [here](https://github.com/airbytehq/quickstarts/blob/main/pyairbyte_notebooks/PyAirbyte_GA4_Demo.ipynb) to go to the demo. "
]
}
],
"metadata": {
"colab": {
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"24a7bb9210cc4c00b19f12715f36ef8a": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": "auto"
}
},
"3031c476bc464ed08e90b5e91ee6393b": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": "black",
"description_width": ""
}
},
"3330876bb7d547b28191270cb53c3e57": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": "auto"
}
},
"42b10528846241e0aac16458a1b8655f": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": "black",
"description_width": ""
}
},
"733f09d5b4584b0382560faab0629b0c": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_3330876bb7d547b28191270cb53c3e57",
"max": 100,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_42b10528846241e0aac16458a1b8655f",
"value": 100
}
},
"d7c54b0aaa4442a19254501027caf793": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_24a7bb9210cc4c00b19f12715f36ef8a",
"max": 100,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_3031c476bc464ed08e90b5e91ee6393b",
"value": 100
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: airbyte_lib_notebooks/AirbyteLib_Github_Incremental_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo has been be moved. Click [here](https://github.com/airbytehq/quickstarts/blob/main/pyairbyte_notebooks/PyAirbyte_Github_Incremental_Demo.ipynb) to go to the demo. "
]
}
],
"metadata": {
"colab": {
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: airbyte_lib_notebooks/PyAirbyte_Postgres_Custom_Cache_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo has been be moved. Click [here](https://github.com/airbytehq/quickstarts/blob/main/pyairbyte_notebooks/PyAirbyte_Postgres_Custom_Cache_Demo.ipynb) to go to the demo. "
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: airbyte_lib_notebooks/PyAirbyte_Shopify_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo has been be moved. Click [here](https://github.com/airbytehq/quickstarts/blob/main/pyairbyte_notebooks/PyAirbyte_Shopify_Demo.ipynb) to go to the demo. "
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: airbyte_lib_notebooks/README.md
================================================
# PyAirbyte Notebooks Quickstart
This quickstart will help you get started quickly with PyAirbyte.
> **Note:** **PyAirbyte is currently in private beta and is not intended for production use.**
## Quickstart Quicklinks
To jump right in, click on any of the below links to open a new Colab notebook from the provided quickstart template.
1. [Basic Features Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/airbyte_lib_notebooks/AirbyteLib_Basic_Features_Demo.ipynb) - Walks through the basic functionality of PyAirbyte and how to use it in a Notebook environment.
2. [CoinAPI Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/airbyte_lib_notebooks/AirbyteLib_CoinAPI_Demo.ipynb) - Shows how to provide credentials securely and perform basic graphing.
3. [GitHub Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/airbyte_lib_notebooks/AirbyteLib_Github_Incremental_Demo.ipynb) - Demonstrates how to get data from GitHub, how to analyze GitHub metrics and how to refresh your cache data incrementally.
4. [GA4 Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/airbyte_lib_notebooks/AirbyteLib_GA4_Demo.ipynb) - A Google Analytics demo showing how to analyze page views and other GA metrics.
## How to use these Quickstarts
There are three ways to use the quickstart resources here.
### Google Colab
Google Colab ("Colab" for short) is a hosted version of Jupyter. Because it is hosted by google, most people can access Colab using their existing Google account. To use these notebooks in Colab, click on the "Open in Colab" badge at the top of the file.
Note:
- Colab doesn't come with virtual environment ("venv") support by default. For this reason, our demo workbooks start by installing venv support as a prerequisite, using `apt-get`.
### Self-Hosted Jupyter
If you have a self-hosted Jupyter instance, you can load any of the notebooks in this directory.
### VS Code Notebooks
You can run these notebooks natively in VS Code if you have the Python extension installed. You can also use GitHub Codespaces to open a new VS Code devcontainer in your web browser.
## Securely Managed Secrets
You can pass secrets to PyAirbyte by using the `get_secret()` function. This call will retrieve a named secret from any of the following locations:
1. Google Colab Secrets
2. Environment Variables
3. Masked User Input (via [getpass](https://docs.python.org/3/library/getpass.html))
If you are using Google Colab, we suggest using the Colab secrets feature. For other environments, you can set your secret values in environment variables.
Note: The `get_secret()` implementation in PyAirbyte is provided for your convenience as a secure runtime-agnostic default secrets interface. You are always free to use any secrets management platform you are most familiar with.
**Warning:** Please do not enter your secrets directly into notebook cells. Doing so can cause the secret to be leaked into logs and/or in the "previous versions" look-back of the notebook. Instead, simply call `get_secret()` without pre-initializing the value. If the value is not already initialized, you will be prompted for secret values interactively and all values will be masked during input to avoid accidental leakage. This is performed using the Python standard library [`getpass`](https://docs.python.org/3/library/getpass.html).
================================================
FILE: airbyte_s3_pinecone_rag/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
secrets/
================================================
FILE: airbyte_s3_pinecone_rag/README.md
================================================
# Airbyte S3 Pinecone RAG
Airbyte S3 Pinecone RAG repo provides a quickstart template for building a full data stack using Airbyte cloud, Terraform, and dbt to move data from S3 -> BigQuery -> Pinecone for interacting with fetched data through an LLM and form a full fledged Retrieval-Augmented Generation.
This quickstart is designed to minimize setup hassles and propel you forward.

## Table of Contents
- [Prerequisites](#prerequisites)
- [Creating an Environment For Your Project](#1-creating-an-environment-for-your-project)
- [Setting Up BigQuery](#2-setting-up-bigquery)
- [Adding Configuration Values](#3-adding-configuration-values)
- [Setting Up Airbyte Connectors](#4-setting-up-airbyte-connectors)
- [Sync S3 Data into BigQuery](#5-sync-s3-data-into-bigquery)
- [Setting Up the dbt Project](#6-setting-up-the-dbt-project)
- [Publishing Into Pinecone](#7-publishing-into-pinecone)
- [Asking Questions About Your Data](#8-asking-questions-about-your-data)
- [Next Steps](#9-next-steps)
## Prerequisites
### Amazon S3
The source to fetch data is from s3 to make the content searchable in Pinecone. Follow the [S3 source docs](https://docs.airbyte.com/integrations/sources/s3) for information on configuring a S3 source.
### BigQuery
BigQuery will store the raw API data (csv in example) from our sources and also the transformed data from dbt. You'll need a BigQuery project and a dataset with a service account that can control the dataset. Airbyte's [BigQuery destination docs](https://docs.airbyte.com/integrations/destinations/bigquery) lists the requirements and links describing how to configure.
### Pinecone
Pinecone is the vector database we will use to index documents and their metadata, and also for finding documents that provide context for a query. You'll need a Pinecone account, an API key, and an index created with 1536 dimensions, as OpenAI returns vectors of 1536 dimensions. See the [Pinecone docs](https://docs.pinecone.io/docs/quickstart) for more information.
### OpenAI
OpenAI is used both in processing the query and also provides the LLM for generating a response. The query is vectorized so it can be used to identify relevant items in the Pinecone index, and these items are provided to the LLM as context to better respond to the query. You'll need an OpenAI account with credits and an API key. If you already have an account with OpenAI, you can generate a new API key by visiting this link: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
### Software
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: If you want to deploy the open-source version instead of using Airbyte Cloud, follow the installation instructions from the [Deploy Airbyte documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Creating an Environment For Your Project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add pyairbyte_s3_pinecone_rag
```
2. **Navigate to the directory**:
```bash
cd pyairbyte_s3_pinecone_rag
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv .venv
source .venv/bin/activate
```
- For Windows:
```bash
python -m venv .venv
.\.venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Adding Configuration Values
The following steps will execute Terraform and dbt workflows to create the necessary resources for the integration. To do this, you'll need to provide some configuration values. Copy the provided `.env.template` file to `.env` and set its values. Then run the following command to source the environment variables into your shell so they are available when running Terraform and dbt:
```bash
set -o allexport && source .env && set +o allexport
```
Don't forget to re-run the above command after making any changes to the `.env` file.
## 4. Setting Up Airbyte Connectors
### Manually via the Airbyte UI
Create the [sources](https://docs.airbyte.com/quickstart/add-a-source) and [destinations](https://docs.airbyte.com/quickstart/add-a-destination) within your Airbyte environment, you can follow the [create connections](https://docs.airbyte.com/quickstart/create-a-connection) to define connections between them to control how and when the data will sync.
You can find the Airbyte workspace ID from the URL, e.g. `https://cloud.airbyte.com/workspaces/{workspace-id}/connections`.
### With Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set it up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Defines variables whose values are populated from the `.env` file.
If you're using Airbyte Cloud instead of a local deployment you will need to update the Airbyte provider configuration in _infra/airbyte/provider.tf_, setting the `bearer_auth` to an API key generated at https://portal.airbyte.com/.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
If you're using Airbyte Cloud, we configured Terraform to assemble the URL for you:
```bash
terraform output
```
will print something like:
```
airbyte_cloud_url = "https://cloud.airbyte.com/workspaces/{workspace-id}/connections"
```
## 5. Sync S3 Data into BigQuery
Before building the dbt project, transforming the raw s3 data, the source tables must exist in the BigQuery dataset. Open the Airbyte UI and navigate to the Connections page. Click the _Sync Now_ button for `S3 to BigQuery` to start the sync.
Once the sync is complete you can inspect the tables in BigQuery.
## 6. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform, and is preconfigured to pull the connection details from environment variables.
You can use the `dbt_project/example/schema.yml` to make your own dbt_model and create your own sql files corresponding to the dbt_model names
2. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided as the `dev` config in `profiles.yml`.
3. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
4. **Run the Model**
With the connection in place, you can now build the model to create the `S3` view in BigQuery, which the configured Airbyte connection will use to sync S3 data into Pinecone.
You can inspect the provided _dbt_project/models/marts/purchase_data.sql_ to see removing null values after querying for purchase patterns.
```bash
dbt build
```
You should now see the `s3_data` view in BigQuery.
## 7. Publishing Into Pinecone
With the source data transformed it is now ready to publish into the Pinecone index. Head back to the Connections and start a sync for `Publish BigQuery Data to Pinecone`.
## 8. Asking Questions About Your Data
After Airbyte has published the S3 data text into your Pinecone index, it is ready to be interacted with via an LLM model. After providing a couple more environment variables, the provided _query.py_ will provide an interactive session to ask questions about your data.
```bash
export OPENAI_API_KEY=openai_api_key
export PINECONE_API_KEY=pinecone_api_key
export PINECONE_ENVIRONMENT=pinecone_environment
export PINECONE_INDEX=pinecone_index
python query.py
```
## 9. Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Use Additional Data Sources**:
Pinecone allows you to store vectors from multiple sources in a single index. This allows multiple document types to be queried together, and even referenced between themselves by the LLM. Add additional sources to the Terraform configuration or manually through the Airbyte UI.
2. **Try Different LLM Chains**:
The provided _query.py_ uses many default values when creating the LLM chain, which is quick to create but limits flexibility. One constraint is that the `stuff` chain type requires the query and discovered documents all fit within a single tokenized input to the LLM. This is fine for short queries and documents, but if you use longer sources you will need to use another chain type such as `map_reduce`.
3. **Extend Access to the LLM**:
The provided _query.py_ is a simple example of how to interact with the LLM locally. You could build a web-based UI that triggers a query and displays the results, or even integrate the LLM into a Slack bot to provide answers to questions in real-time.
4. **Make Use of Metadata**
When configuring the Pinecone destination you can choose to include metadata fields from the data. This allows you to filter results based on the metadata fields. For example, you could filter the results of a query to only include documents from a specific author, or provide a time range that a matching item must have been created or edited within. Metadata values are also provided back to the query executor so you can use them to provide additional context to the LLM.
See [Filtering with metadata](https://docs.pinecone.io/docs/metadata-filtering) for more details.
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
.vscode
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: "dbt_project"
version: "1.0.0"
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: "dbt_project"
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
example:
+materialized: view
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/models/marts/purchase_data.sql
================================================
/* As example csv data is typiclly flat,
getting purchase patterns with a basic sql query and cleansing null */
WITH purchase_data AS (
SELECT
user_id,
CASE WHEN category IS NULL THEN 'unknown' ELSE category END AS product_category,
CASE WHEN brand IS NULL THEN 'unknown' ELSE brand END AS brand,
purchased_at,
added_to_cart_at,
TIMESTAMP_DIFF(purchased_at, added_to_cart_at, SECOND) AS time_to_purchase_seconds,
returned_at
FROM {{ ref('stg_purchases') }}
)
SELECT
user_id,
product_category,
brand,
time_to_purchase_seconds,
CASE WHEN description IS NULL THEN '' ELSE description END AS product_description
FROM purchase_data
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/models/sources/s3.source.yml
================================================
version: 2
sources:
- name: s3_raw
database: "{{ env_var('BIGQUERY_PROJECT_ID') }}"
schema: "{{ env_var('BIGQUERY_DATASET_ID') }}"
tables:
- name: s3_csv
description: "Purchase data gathered from csv"
columns:
- name: id
description: "Unique identifier for the purchase."
- name: updated_at
- name: purchased_at
- name: user_id
- name: returned_at
- name: product_id
- name: created_at
- name: added_to_cart_at
- name: _airbyte_extracted_at
- name: _airbyte_raw_id
- name: _airbyte_meta
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/models/staging/stg_purchases.sql
================================================
-- models/stg_purchases.sql
select
id,
user_id,
product_id,
updated_at,
purchased_at,
returned_at,
created_at,
added_to_cart_at,
_airbyte_extracted_at,
from {{ source('s3', 'purchases') }}
================================================
FILE: airbyte_s3_pinecone_rag/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
type: bigquery
project: "{{ env_var('BIGQUERY_PROJECT_ID') }}"
dataset: "{{ env_var('BIGQUERY_DATASET_ID') }}"
location: "{{ env_var('BIGQUERY_DATASET_LOCATION') }}"
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH') }}"
method: service-account
priority: interactive
job_execution_timeout_seconds: 300
job_retries: 1
threads: 1
target: dev
================================================
FILE: airbyte_s3_pinecone_rag/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: airbyte_s3_pinecone_rag/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.4"
constraints = "0.3.4"
hashes = [
"h1:E97NK92naRr/9iAtDxA1PJ1aQYWR/vqWN10ThuQjUn8=",
"zh:02167e00f7e89b6f09ae8796b9ee0ac2d8702b5cb295cb27d7a79266ffafe196",
"zh:1ddad39354af090e830caf1e5cce845f24ff0bcef61b73e77ebc7703c2ecf90d",
"zh:223a0a46d354ad0709d5f28d60accb3448ba5f256b84438238fb05235d1e5b34",
"zh:29efd8848b9560456ec3d90f54984670e9d5b7e36f1edd2adb15c5fec3f57166",
"zh:33d31310ba7ec699b5bd64edbb63b0a89bd55d87fae0f55409bbfa5fd7dd4d90",
"zh:35ed0e2894e28ec7762406a18510b789b76b0649ace309eec22acaf10c982f08",
"zh:4ba860918b65c00cc596d0b5b40068b89a72a300604a62bca7d286073779e684",
"zh:59a0d1128477e587d9dac71f93598bae6050d176d29c840b6ad1bf95529d61e8",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d9bb37e9094eba02acf8d08cf9f3331cd7c26478441d70e74e8d1ec9cb33aaa",
"zh:9e5243eac43950889781a88d4e6186aea240898045e0e3c8fffd3291c5e74b6f",
"zh:a0c31a5bc0cbc4a7341a0d185806a1c6797508580bede71a5009ad7b078d68c2",
"zh:af341259999c6639a1c27e8f116a40b088dd192a3057096dc23a42affc97113f",
"zh:b9779f8f695b4fab56e062abab61eaa58853f20c6411d53b2bd82a66d79a8b49",
"zh:e284d898e5a30e507f1292635542dafe0e95ea8a5a215103a9d96d699aed9e75",
]
}
================================================
FILE: airbyte_s3_pinecone_rag/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
locals {
// The BigQuery destination expects a JSON credentials file, but the source expects a JSON string.
// Read JSON file into a string variable, so we can use the same contents also for the source connector.
bigquery_credentials_json = replace(
file(var.bigquery_credentials_json_file_path), "\n", ""
) // <- If this fails, please double check that the file exists.
airbyte_cloud_url = var.airbyte_workspace_id != null ? "https://cloud.airbyte.com/workspaces/${var.airbyte_workspace_id}/connections" : "(n/a)"
}
// Sources
resource "airbyte_source_bigquery" "bigquery" {
configuration = {
credentials_json = local.bigquery_credentials_json
project_id = var.bigquery_project_id
dataset_id = var.bigquery_dataset_id
source_type = "bigquery"
}
name = "BigQuery Publishing Source"
workspace_id = var.airbyte_workspace_id
}
resource "airbyte_source_s3" "s3" {
configuration = {
source_type = "s3"
bucket = var.bucket
aws_access_key_id = var.aws_access_key_id
aws_secret_access_key = var.aws_secret_access_key
endpoint = "https://s3.amazonaws.com"
path_pattern = "**"
format = {
csv_format = {
ignore_errors_on_fields_mismatch = true
}
}
provider = {
aws_access_key_id = var.aws_access_key_id
aws_secret_access_key = var.aws_secret_access_key
bucket = var.bucket #Change to your need
endpoint = "https://s3.amazonaws.com" #Change to your need
path_prefix = "/Wallpapers/" #Change to your need
region_name = "us-west-2" #Change to your need
start_date = "2021-01-01T00:00:00Z" #Change to your need
}
region_name = "us-west-2"
streams = [
{
name = "Wallpapers"
file_type = "csv"
format = {
csv_format = {
filetype = "csv"
}
}
},
]
}
name = "usgs-landsat" #Change to your need
workspace_id = var.airbyte_workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.bigquery_dataset_id
dataset_location = var.bigquery_dataset_location
destination_type = "bigquery"
project_id = var.bigquery_project_id
credentials_json = local.bigquery_credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery Raw Data Destination"
workspace_id = var.airbyte_workspace_id
}
resource "airbyte_destination_pinecone" "pinecone" {
configuration = {
destination_type = "pinecone"
embedding = {
destination_pinecone_embedding_open_ai = {
openai_key = var.openai_key
mode = "openai"
}
}
indexing = {
index = var.pinecone_index
pinecone_environment = var.pinecone_environment
pinecone_key = var.pinecone_key
}
processing = {
chunk_overlap = 16
chunk_size = 1024
metadata_fields = ["url", "last_edited_time"]
text_fields = ["s3_text"]
}
}
name = "Pinecone Publish Destination"
workspace_id = var.airbyte_workspace_id
}
// Connections
resource "airbyte_connection" "s3_connection" {
name = "S3 to BigQuery"
source_id = airbyte_source_s3.s3.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
namespace_definition = "custom_format"
namespace_format = var.bigquery_dataset_id
format = {
csv = {}
}
prefix = "s3_"
configurations = {
streams = [
{
name = "Wallpapers"
file_type = "csv"
format = {
csv_format = {
filetype = "csv"
}
}
},
]
}
}
resource "airbyte_connection" "bigquery_to_pinecone" {
name = "Publish BigQuery Data to Pinecone"
source_id = airbyte_source_bigquery.bigquery.source_id
destination_id = airbyte_destination_pinecone.pinecone.destination_id
configurations = {
streams = [
{
name = "s3_data",
cursor_field = ["last_edited_time"],
primary_key = [["url"]]
sync_mode = "incremental_deduped_history"
}
]
}
}
================================================
FILE: airbyte_s3_pinecone_rag/infra/airbyte/output.tf
================================================
// These variables will be printed at the end of a successful `terraform apply`,
// or by running `terraform output` at any time.
output "airbyte_cloud_url" {
value = local.airbyte_cloud_url
}
================================================
FILE: airbyte_s3_pinecone_rag/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
/////////////////////////////////
// Airbyte Provider Definition //
/////////////////////////////////
// Uncomment the OSS or Cloud block, depending on your desired deployment location:
// Airbyte Cloud:
provider "airbyte" {
bearer_auth = var.airbyte_cloud_auth_key
}
# // Airbyte OSS:
# provider "airbyte" {
# // Optionally override the airbyte-api-server URL
# server_url = "http://localhost:8006/v1"
# // Optionally override the default password/username below
# username = "airbyte"
# password = "password"
# }
================================================
FILE: airbyte_s3_pinecone_rag/infra/airbyte/variables.tf
================================================
// Airbyte
variable "airbyte_workspace_id" {
description = < What do you want to know?"))
console.print("")
while True:
try:
query = input("")
except KeyboardInterrupt:
console.print("\n")
console.print(Markdown("_Goodbye!_ 👋"))
exit(0)
answer = qa.run(query)
console.print(Markdown(answer))
console.print(Markdown("\n------\n> What else do you want to know?\n"))
console.print("\n")
================================================
FILE: airbyte_s3_pinecone_rag/quickstart.md
================================================
# Quick Start
```bash
# Change to the quickstart directory
cd pyairbyte_s3_pinecone_rag
# Create a new `.env` file from the template
cp .env.template .env
# Edit the `.env` file and provide all needed variables
code .env
# Export the .env file variables
set -o allexport && source .env && set +o allexport
# Deploy terraform
terraform -chdir=infra/airbyte init
terraform -chdir=infra/airbyte apply
# Print Airbyte Cloud workspace URL
terraform -chdir=infra/airbyte output
# In Airbyte Cloud:
# 1. Peform any needed fine-tuning or debugging.
# 2. Run the source data connection at least once.
# Build dbt models
dbt run --project-dir=dbt_project --profiles-dir=dbt_project
# In Airbyte Cloud:
# 1. Create the publish connection if needed.
# 2. Run the publish connection at least once.
# Create and activate the virtual environment for the AI chatbot
python -m venv .venv
source .venv/bin/activate
# Run the chatbot
./query.py
```
Please see the [Readme](README.md) for more detailed instructions.
================================================
FILE: airbyte_s3_pinecone_rag/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="pyairbyte_s3_pinecone_rag",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"pinecone-client",
"langchain",
"openai==0.28.1",
"tiktoken",
"rich"
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: api_to_warehouse/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: api_to_warehouse/Readme.md
================================================
# API to Warehouse Stack
Welcome to the "API to Warehouse Stack" repository! This repository offers a simple template to help you get data from various APIs and put it into your data warehouse for further analysis using Airbyte. You can use several supported APIs as your data sources for this process. Here are a few examples of the APIs you can set up to extract data using Airbyte.
- [Twitter API](https://docs.airbyte.com/integrations/sources/twitter)
- [US census API](https://docs.airbyte.com/integrations/sources/us-census)
- [Rocket.chat API](https://docs.airbyte.com/integrations/sources/rocket-chat)
- [Recreation.gov API](https://docs.airbyte.com/integrations/sources/recreation)
- [Polygon Stock API](https://docs.airbyte.com/integrations/sources/polygon-stock-api)
- [PokeAPI](https://docs.airbyte.com/integrations/sources/pokeapi)
- Many more
Here are some data warehouses that users can choose as a destination to load the data extracted from APIs.
- [Snowflake](https://docs.airbyte.com/integrations/destinations/snowflake)
- [BigQuery](https://docs.airbyte.com/integrations/destinations/bigquery)
- [Amazon Redshift](https://docs.airbyte.com/integrations/destinations/redshift)
- Many more
In this process, we'll use the Github API to get data and Snowflake as the data warehouse to store the data.
## Table of Contents
- [API to Warehouse Stack](#api-to-warehouse-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Setting an environment for your project](#setting-an-environment-for-your-project)
- There are two ways to setup the connectors of airbyte.
- [1. Using Airbyte UI](#1-using-airbyte-ui)
- [2. Using Terraform to Setup the Connector](#2-using-terraform-to-setup-the-connector)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add api_to_warehouse
```
2. **Navigate to the directory**:
```bash
cd api_to_warehouse
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 1. Using Airbyte UI
To establish the connection and import data from the Github API into the Snowflake warehouse, kindly proceed by utilizing the Airbyte user interface. The following steps should be adhered to:
1. Run the Airbyte OSS version by following the [documentation](https://docs.airbyte.com/quickstart/deploy-airbyte).
2. Setup the Github API as source by following [these steps](https://docs.airbyte.com/integrations/sources/github).
3. Setup the Snowflake as destination by following [these steps](https://docs.airbyte.com/integrations/destinations/snowflake)
4. Please proceed to configure the synchronization time and select the specific tables you wish to load into Snowflake from GitHub. You can make your selection from the list of available streams.
5. Enjoy :smile:, your data loaded into Snowflake data warehouse from Github API.
## 2. Using Terraform to Setup the Connector
Airbyte enables you to make connections between different platforms by creating connectors for sources and destinations. In this project, we're using Terraform to automate the setup of these connectors and their connections. Here's how you can do it:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
After Terraform finishes its tasks, go to the Airbyte user interface. You will find your source and destination connectors already set up, along with the connection between them, all ready to use.
## Next Steps
After you extract and load data from an API into a data warehouse, you can analyze the data. For example, we used Snowflake data warehouse, which supports analytical tools like [Tableau](https://www.snowflake.com/resource/best-practices-for-using-tableau-with-snowflake/?utm_cta=website-be-trending-snowflake-tableau-ek), [Talend](https://www.snowflake.com/technology-partners/talend/) and [Sigma](https://www.snowflake.com/technology-partners/sigma/).
================================================
FILE: api_to_warehouse/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: api_to_warehouse/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.4"
constraints = "0.3.4"
hashes = [
"h1:0AHJKsRTlX6BCJZCJw5/oHsN97zi1AP33JeuPMwoX6U=",
"zh:02167e00f7e89b6f09ae8796b9ee0ac2d8702b5cb295cb27d7a79266ffafe196",
"zh:1ddad39354af090e830caf1e5cce845f24ff0bcef61b73e77ebc7703c2ecf90d",
"zh:223a0a46d354ad0709d5f28d60accb3448ba5f256b84438238fb05235d1e5b34",
"zh:29efd8848b9560456ec3d90f54984670e9d5b7e36f1edd2adb15c5fec3f57166",
"zh:33d31310ba7ec699b5bd64edbb63b0a89bd55d87fae0f55409bbfa5fd7dd4d90",
"zh:35ed0e2894e28ec7762406a18510b789b76b0649ace309eec22acaf10c982f08",
"zh:4ba860918b65c00cc596d0b5b40068b89a72a300604a62bca7d286073779e684",
"zh:59a0d1128477e587d9dac71f93598bae6050d176d29c840b6ad1bf95529d61e8",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d9bb37e9094eba02acf8d08cf9f3331cd7c26478441d70e74e8d1ec9cb33aaa",
"zh:9e5243eac43950889781a88d4e6186aea240898045e0e3c8fffd3291c5e74b6f",
"zh:a0c31a5bc0cbc4a7341a0d185806a1c6797508580bede71a5009ad7b078d68c2",
"zh:af341259999c6639a1c27e8f116a40b088dd192a3057096dc23a42affc97113f",
"zh:b9779f8f695b4fab56e062abab61eaa58853f20c6411d53b2bd82a66d79a8b49",
"zh:e284d898e5a30e507f1292635542dafe0e95ea8a5a215103a9d96d699aed9e75",
]
}
================================================
FILE: api_to_warehouse/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources - Documentation of Source : https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/source_github
resource "airbyte_source_github" "my_source_github" {
configuration = {
# branch = ""
credentials = {
source_github_authentication_personal_access_token = {
// Log into GitHub and then generate a personal access token. To load balance your API quota consumption across multiple API tokens, input multiple tokens separated with ","
personal_access_token = ""
}
}
repository = ""
# requests_per_hour = 10
source_type = "github"
// Enter the date
start_date = ""
}
name = "Github API"
# secret_id = "...my_secret_id..."
workspace_id = var.workspace_id
}
// Destinations - Documentation of Destination: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/destination_snowflake
resource "airbyte_destination_snowflake" "my_destination_snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_username_and_password = {
password = ""
}
}
database = ""
destination_type = "snowflake"
host = ""
# jdbc_url_params = "...my_jdbc_url_params..."
# raw_data_schema = "...my_raw_data_schema..."
role = ""
schema = ""
username = ""
warehouse = ""
}
name = "Snowflake Warehouse"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "github_to_snowflake" {
name = "Github API to Snowflake Data Warehouse"
source_id = airbyte_source_github.my_source_github.source_id
destination_id = airbyte_destination_snowflake.my_destination_snowflake.destination_id
status = "active"
configurations = {
// Available Streams = Comments, Commit comment reactions, Commit comments,
# Commits, Deployments, Events, Issue comment reactions, Issue events, Issue milestones,
# Issue reactions, Issues, Project cards, Project columns, Projects, Pull request comment reactions,
# Pull requests, Pull request stats, Releases, Review comments, Reviews, Stargazers, Workflow runs,
# Workflows
streams = [
{
name = "projects"
},
{
name = "commits"
},
]
sync_mode = "full_refresh_overwrite"
}
schedule = {
cron_expression = ""
schedule_type = "cron"
}
}
================================================
FILE: api_to_warehouse/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: api_to_warehouse/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: api_to_warehouse/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="api-to-warehouse",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: customer_segmentation_analytics_shopify/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: customer_segmentation_analytics_shopify/README.md
================================================
# Customer Segmentation Analytics Stack With Shopify, Airbyte, Dbt, Dagster and BigQuery
Welcome to the "Customer Segmentation Analytics Stack" repository! ✨ This is your go-to place to easily set up a data stack using Shopify, Airbyte, Dbt, BigQuery, and Dagster. With this setup, you can pull Shopify data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:
.svg>)
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add customer_segmentation_analytics_shopify
```
2. **Navigate to the directory**:
```bash
cd customer_segmentation_analytics_shopify
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
6. **Visualise the Data(optional)**:
This is totally an optional step to visualise the data. We will be using python and matplotlib you can use any of your choice. First we need to install the necessary dependencies and we can do this by the following command.
```bash
pip install google-cloud-bigquery matplotlib
```
Now create a folder named "analyses" under the dbt_project directory. Make sure to name the folder exactly the same as you've mentioned in the `dbt_project.yml` file otherwise it will throw error. Next, create python file under the "analyses" folder with appropriate name like `customer_activity_analysis.py`. Now write down your python script for the analysis. Make sure to set your BigQuery service account json file path as environment variables and use it to authenticate with BigQuery.
Now after you are done writing your python script go to "analyses" folder.
```bash
cd analyses
```
Now run the following command to run the python file. Make sure to replace `customer_activity_analysis.py` with your actual file name.
```bash
python customer_activity_analysis.py
```
You should then see a window displaying a beautiful chart.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Customer Satisfaction Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/analyses/customer_activity_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
# Path to your BigQuery service account key
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
# Initialize the BigQuery client
client = bigquery.Client.from_service_account_json(service_account_key_path)
# SQL query to categorize customers based on their activity level
query = """
WITH customer_activity AS (
SELECT
id,
orders_count,
CASE
WHEN orders_count >= 10 THEN 'Highly Active'
WHEN orders_count >= 5 THEN 'Moderately Active'
ELSE 'Low Activity'
END AS activity_level
FROM transformed_data.stg_customers
)
SELECT
id,
orders_count,
activity_level
FROM customer_activity
"""
# Run the query
query_job = client.query(query)
# Get the results
results = query_job.result()
# Extract the data
customer_ids = []
orders_counts = []
activity_levels = []
for row in results:
customer_ids.append(row.id)
orders_counts.append(row.orders_count)
activity_levels.append(row.activity_level)
# Create a bar chart to visualize the data
plt.figure(figsize=(10, 6))
plt.bar(activity_levels, orders_counts, width=0.6, align='center', alpha=0.7)
plt.xlabel('Activity Level')
plt.ylabel('Number of Customers')
plt.title('Customer Activity Level Segmentation')
plt.grid(axis='y', linestyle='--', alpha=0.6)
plt.show()
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/analyses/purchase_pattern_segmentation_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
# Path to your BigQuery service account key
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
# Initialize the BigQuery client
client = bigquery.Client.from_service_account_json(service_account_key_path)
# SQL query to categorize customers based on their purchase behavior
query = """
WITH purchase_data AS (
SELECT
id,
COUNT(*) AS total_purchases,
SUM(total_spent) AS total_spent
FROM transformed_data.stg_customers
GROUP BY id
),
behavioral_segments AS (
SELECT
id,
CASE
WHEN total_purchases >= 5 AND total_spent >= 500 THEN 'High-Value Shoppers'
WHEN total_purchases < 5 AND total_spent < 100 THEN 'Low-Value Shoppers'
ELSE 'Regular Shoppers'
END AS purchase_segment
FROM purchase_data
)
SELECT * FROM behavioral_segments
"""
# Run the query
query_job = client.query(query)
# Get the results
results = query_job.result()
# Extract the data
customer_ids = []
purchase_segments = []
for row in results:
customer_ids.append(row.id)
purchase_segments.append(row.purchase_segment)
# Count the number of customers in each segment
segment_counts = {segment: purchase_segments.count(segment) for segment in set(purchase_segments)}
# Create a bar chart to visualize the data
segments, counts = zip(*segment_counts.items())
plt.figure(figsize=(10, 6))
plt.bar(segments, counts, width=0.6, align='center', alpha=0.7)
plt.xlabel('Purchase Behavior Segment')
plt.ylabel('Number of Customers')
plt.title('Customer Purchase Behavior Segmentation')
plt.grid(axis='y', linestyle='--', alpha=0.6)
plt.show()
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/analyses/rfm_segmentation_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
# Path to your BigQuery service account key
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
# Initialize the BigQuery client
client = bigquery.Client.from_service_account_json(service_account_key_path)
# SQL query to categorize customers based on RFM segments
query = """
WITH rfm_data AS (
SELECT
user_id,
MAX(created_at) AS last_purchase_date,
COUNT(DISTINCT order_id) AS total_orders,
SUM(amount) AS total_spent
FROM transformed_data.stg_transactions
GROUP BY user_id
),
rfm_segments AS (
SELECT
user_id,
CASE
WHEN TIMESTAMP_DIFF(CAST(CURRENT_DATE AS TIMESTAMP), last_purchase_date, DAY) <= 30 THEN 'Active'
WHEN total_orders >= 5 THEN 'Loyal'
WHEN total_spent >= 500 THEN 'High-Spending'
ELSE 'Churned'
END AS rfm_segment
FROM rfm_data
)
SELECT * FROM rfm_segments
"""
# Run the query
query_job = client.query(query)
# Get the results
results = query_job.result()
# Extract the data
user_ids = []
rfm_segments = []
for row in results:
user_ids.append(row.user_id)
rfm_segments.append(row.rfm_segment)
# Count the number of customers in each segment
segment_counts = {segment: rfm_segments.count(segment) for segment in set(rfm_segments)}
# Create a bar chart to visualize the data
segments, counts = zip(*segment_counts.items())
plt.figure(figsize=(10, 6))
plt.bar(segments, counts, width=0.6, align='center', alpha=0.7)
plt.xlabel('RFM Segment')
plt.ylabel('Number of Customers')
plt.title('Customer RFM Segmentation')
plt.grid(axis='y', linestyle='--', alpha=0.6)
plt.show()
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/models/marts/customer_activity.sql
================================================
WITH customer_activity AS (
SELECT
id,
orders_count,
CASE
WHEN orders_count >= 10 THEN 'Highly Active'
WHEN orders_count >= 5 THEN 'Moderately Active'
ELSE 'Low Activity'
END AS activity_level
FROM transformed_data.stg_customers
)
SELECT
id,
orders_count,
activity_level
FROM customer_activity
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/models/marts/purchase_pattern_segmentation.sql
================================================
with purchase_data as (
select
id,
count(*) as total_purchases,
sum(total_spent) as total_spent
from transformed_data.stg_customers
group by id
),
behavioral_segments as (
select
id,
case
when total_purchases >= 5 and total_spent >= 500 then 'High-Value Shoppers'
when total_purchases < 5 and total_spent < 100 then 'Low-Value Shoppers'
else 'Regular Shoppers'
end as purchase_segment
from purchase_data
)
select * from behavioral_segments
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/models/marts/rfm_segmentation.sql
================================================
with rfm_data as (
select
user_id,
max(created_at) as last_purchase_date,
count(distinct order_id) as total_orders,
sum(amount) as total_spent
from transformed_data.stg_transactions
group by user_id
),
rfm_segments as (
select
user_id,
case
when TIMESTAMP_DIFF(CAST(current_date AS TIMESTAMP), last_purchase_date, DAY) <= 30 then 'Active'
when total_orders >= 5 then 'Loyal'
when total_spent >= 500 then 'High-Spending'
else 'Churned'
end as rfm_segment
from rfm_data
)
select * from rfm_segments
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/models/sources/shopify_sources.yml
================================================
version: 2
sources:
- name: shopify
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: shopify_airbyte
tables:
- name: customers
description: "Simulated customers data from the Shopify connector."
columns:
- name: id
description: "Unique identifier for the customers."
- name: accepts_marketing
- name: accepts_marketing_updated_at
- name: addresses
- name: admin_graphql_api_id
- name: created_at
- name: currency
- name: default_address
- name: email
- name: email_marketing_consent
- name: first_name
- name: last_name
- name: last_order_id
- name: last_order_name
- name: marketing_opt_in_level
- name: multipass_identifier
- name: note
- name: orders_count
- name: phone
- name: shop_url
- name: sms_marketing_consent
- name: state
- name: tags
- name: tax_exempt
- name: tax_exemptions
- name: total_spent
- name: updated_at
- name: verified_email
- name: transactions
description: "Simulated transactions data from the Shopify connector."
columns:
- name: id
description: "Unique identifier for the transactions."
- name: admin_graphql_api_id
- name: amount
- name: authorization
- name: created_at
- name: currency
- name: device_id
- name: error_code
- name: gateway
- name: kind
- name: location_id
- name: message
- name: order_id
- name: parent_id
- name: payment_details
- name: payment_id
- name: processed_at
- name: receipt
- name: shop_url
- name: source_name
- name: status
- name: test
- name: total_unsettled_set
- name: user_id
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/models/staging/stg_customers.sql
================================================
select
*
from {{ source('shopify', 'customers') }}
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/models/staging/stg_transactions.sql
================================================
select
*
from {{ source('shopify', 'transactions') }}
================================================
FILE: customer_segmentation_analytics_shopify/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: customer_segmentation_analytics_shopify/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: customer_segmentation_analytics_shopify/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: customer_segmentation_analytics_shopify/infra/airbyte/main.tf
================================================
// Source
resource "airbyte_source_shopify" "my_source_shopify" {
configuration = {
credentials = {
source_shopify_shopify_authorization_method_api_password = {
api_password = var.api_password
auth_method = "api_password"
}
}
shop = var.shop
source_type = "shopify"
}
name = "Shopify"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "shopify_bigquery" {
name = "Shopify to BigQuery"
source_id = airbyte_source_shopify.my_source_shopify.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "customers"
},
{
name = "transactions"
}
]
}
}
================================================
FILE: customer_segmentation_analytics_shopify/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: customer_segmentation_analytics_shopify/infra/airbyte/variables.tf
================================================
variable "api_password" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
variable "shop" {
type = string
}
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/orchestration/__init__.py
================================================
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password="password"
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance,)
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: customer_segmentation_analytics_shopify/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: customer_segmentation_analytics_shopify/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: data_to_pinecone_llm/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: data_to_pinecone_llm/.vscode/quickstart.code-workspace
================================================
{
"folders": [
{
"path": ".."
},
{
"path": "../.."
}
],
"settings": {}
}
================================================
FILE: data_to_pinecone_llm/README.md
================================================
# Data-to-Pinecone Integration
Welcome to the "Data-to-Pinecone Integration" repository! This repo provides a quickstart template for building a full data stack using Airbyte, Terraform, and dbt to move data from Notion -> BigQuery -> Pinecone for interacting with Notion data through an LLM.
This quickstart is designed to minimize setup hassles and propel you forward.

## Table of Contents
- [Prerequisites](#prerequisites)
- [Creating an Environment For Your Project](#1-creating-an-environment-for-your-project)
- [Adding Configuration Values](#2-adding-configuration-values)
- [Setting Up Airbyte Connectors](#3-setting-up-airbyte-connectors)
- [Sync Notion Data into BigQuery](#4-sync-notion-data-into-bigquery)
- [Setting Up the dbt Project](#5-setting-up-the-dbt-project)
- [Publishing Into Pinecone](#6-publishing-into-pinecone)
- [Asking Questions About Your Data](#7-asking-questions-about-your-data)
- [Next Steps](#8-next-steps)
## Prerequisites
### Notion
We'll source Notion pages to make the content searchable in Pinecone. Follow the [Notion source docs](https://docs.airbyte.com/integrations/sources/notion) for information on configuring a Notion source.
### BigQuery
BigQuery will store the raw API data from our sources and also the transformed data from dbt. You'll need a BigQuery project and a dataset with a service account that can control the dataset. Airbyte's [BigQuery destination docs](https://docs.airbyte.com/integrations/destinations/bigquery) lists the requirements and links describing how to configure.
### Pinecone
Pinecone is the vector database we will use to index documents and their metadata, and also for finding documents that provide context for a query. You'll need a Pinecone account, an API key, and an index created with 1536 dimensions, as OpenAI returns vectors of 1536 dimensions. See the [Pinecone docs](https://docs.pinecone.io/docs/quickstart) for more information.
### OpenAI
OpenAI is used both in processing the query and also provides the LLM for generating a response. The query is vectorized so it can be used to identify relevant items in the Pinecone index, and these items are provided to the LLM as context to better respond to the query. You'll need an OpenAI account with credits and an API key. If you already have an account with OpenAI, you can generate a new API key by visiting this link: [platform.openai.com/api-keys](https://platform.openai.com/api-keys)
### Software
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: If you want to deploy the open-source version instead of using Airbyte Cloud, follow the installation instructions from the [Deploy Airbyte documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Creating an Environment For Your Project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add data_to_pinecone_llm
```
2. **Navigate to the directory**:
```bash
cd data_to_pinecone_llm
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv .venv
source .venv/bin/activate
```
- For Windows:
```bash
python -m venv .venv
.\.venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Adding Configuration Values
The following steps will execute Terraform and dbt workflows to create the necessary resources for the integration. To do this, you'll need to provide some configuration values. Copy the provided `.env.template` file to `.env` and set its values. Then run the following command to source the environment variables into your shell so they are available when running Terraform and dbt:
```bash
set -o allexport && source .env && set +o allexport
```
Don't forget to re-run the above command after making any changes to the `.env` file.
## 3. Setting Up Airbyte Connectors
### Manually via the Airbyte UI
Create the [sources](https://docs.airbyte.com/quickstart/add-a-source) and [destinations](https://docs.airbyte.com/quickstart/add-a-destination) within your Airbyte environment, you can follow the [create connections](https://docs.airbyte.com/quickstart/create-a-connection) to define connections between them to control how and when the data will sync.
You can find the Airbyte workspace ID from the URL, e.g. `https://cloud.airbyte.com/workspaces/{workspace-id}/connections`.
### With Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set it up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Defines variables whose values are populated from the `.env` file.
If you're using Airbyte Cloud instead of a local deployment you will need to update the Airbyte provider configuration in _infra/airbyte/provider.tf_, setting the `bearer_auth` to an API key generated at https://portal.airbyte.com/.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
If you're using Airbyte Cloud, we configured Terraform to assemble the URL for you:
```bash
terraform output
```
will print something like:
```
airbyte_cloud_url = "https://cloud.airbyte.com/workspaces/{workspace-id}/connections"
```



## 4. Sync Notion Data into BigQuery
Before building the dbt project, transforming the raw notion data, the source tables must exist in the BigQuery dataset. Open the Airbyte UI and navigate to the Connections page. Click the _Sync Now_ button for `Notion to BigQuery` to start the sync.

Once the sync is complete you can inspect the tables in BigQuery.

## 5. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform, and is preconfigured to pull the connection details from environment variables.
2. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided as the `keyfile` config in `profiles.yml`.
3. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
4. **Run the Model**
With the connection in place, you can now build the model to create the `notion` view in BigQuery, which the configured Airbyte connection will use to sync Notion pages into Pinecone.
You can inspect the provided _dbt_project/models/notion.sql_ to see how it collates Notion blocks into a single text field which can be vectorized and indexed into Pinecone.
```bash
dbt build
```
You should now see the `notion_data` view in BigQuery.

## 6. Publishing Into Pinecone
With the source data transformed it is now ready to publish into the Pinecone index. Head back to the Connections and start a sync for `Publish BigQuery Data to Pinecone`.

## 7. Asking Questions About Your Data
After Airbyte has published the Notion page text into your Pinecone index, it is ready to be interacted with via an LLM model. After providing a couple more environment variables, the provided _query.py_ will provide an interactive session to ask questions about your data.
```bash
export OPENAI_API_KEY=openai_api_key
export PINECONE_API_KEY=pinecone_api_key
export PINECONE_ENVIRONMENT=pinecone_environment
export PINECONE_INDEX=pinecone_index
python query.py
```
## 8. Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Use Additional Data Sources**:
Pinecone allows you to store vectors from multiple sources in a single index. This allows multiple document types to be queried together, and even referenced between themselves by the LLM. Add additional sources to the Terraform configuration or manually through the Airbyte UI.
2. **Try Different LLM Chains**:
The provided _query.py_ uses many default values when creating the LLM chain, which is quick to create but limits flexibility. One constraint is that the `stuff` chain type requires the query and discovered documents all fit within a single tokenized input to the LLM. This is fine for short queries and documents, but if you use longer sources you will need to use another chain type such as `map_reduce`.
3. **Extend Access to the LLM**:
The provided _query.py_ is a simple example of how to interact with the LLM locally. You could build a web-based UI that triggers a query and displays the results, or even integrate the LLM into a Slack bot to provide answers to questions in real-time.
4. **Make Use of Metadata**
When configuring the Pinecone destination you can choose to include metadata fields from the data. This allows you to filter results based on the metadata fields. For example, you could filter the results of a query to only include documents from a specific author, or provide a time range that a matching item must have been created or edited within. Metadata values are also provided back to the query executor so you can use them to provide additional context to the LLM.
See [Filtering with metadata](https://docs.pinecone.io/docs/metadata-filtering) for more details.
================================================
FILE: data_to_pinecone_llm/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: data_to_pinecone_llm/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: data_to_pinecone_llm/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: data_to_pinecone_llm/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: "dbt_project"
version: "1.0.0"
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: "dbt_project"
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
================================================
FILE: data_to_pinecone_llm/dbt_project/macros/.gitkeep
================================================
================================================
FILE: data_to_pinecone_llm/dbt_project/models/notion.source.yml
================================================
version: 2
sources:
- name: notion_raw
database: "{{ env_var('BIGQUERY_PROJECT_ID') }}"
schema: "{{ env_var('BIGQUERY_DATASET_ID') }}"
tables:
- name: notion_blocks
- name: notion_iterator
- name: notion_pages
================================================
FILE: data_to_pinecone_llm/dbt_project/models/notion_data.sql
================================================
/* Notion data transformation and cleansing.
Notion data will be conformed to plain text:
1. Parent page nodes are recursively parsed downwards to locate
all child nodes.
2. All applicable node types are converted to plain text.
TODO:
- Consider adding markdownified document metatada cues in the
future for headers, list items, tables, etc.
*/
with recursive iterator as (
select JSON_VALUE(parent.page_id) as parent_page_id, *
from {{ source('notion_raw', 'notion_blocks') }} AS blocks
union all
select iterator.parent_page_id, next_block.*
from iterator
join {{ source('notion_raw', 'notion_blocks') }} AS next_block
on JSON_VALUE(next_block.parent.block_id) = iterator.id
),
extracted_text as (
select
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$.plain_text')
FROM UNNEST(JSON_QUERY_ARRAY(paragraph, '$.rich_text')) as json_values
),
' '
) as paragraph_text,
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$[0].plain_text')
FROM
UNNEST(JSON_QUERY_ARRAY(table_row, '$.cells'))
as json_values
),
' '
) as table_row_text,
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$.plain_text')
FROM
UNNEST(JSON_QUERY_ARRAY(heading_1, '$.rich_text'))
as json_values
),
' '
) as heading_1_text,
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$.plain_text')
FROM
UNNEST(JSON_QUERY_ARRAY(heading_2, '$.rich_text'))
as json_values
),
' '
) as heading_2_text,
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$.plain_text')
FROM
UNNEST(JSON_QUERY_ARRAY(heading_3, '$.rich_text'))
as json_values
),
' '
) as heading_3_text,
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$.plain_text')
FROM UNNEST(JSON_QUERY_ARRAY(numbered_list_item, '$.rich_text')) as json_values
),
' '
) as numbered_list_item_text,
ARRAY_TO_STRING(
ARRAY(
SELECT JSON_VALUE(json_values, '$.plain_text')
FROM UNNEST(JSON_QUERY_ARRAY(bulleted_list_item, '$.rich_text')) as json_values
),
' '
) as bulleted_list_item_text,
*
from iterator
),
combined_text as (
select
parent_page_id,
id,
type,
coalesce(
IF(CHAR_LENGTH(paragraph_text) > 0, paragraph_text, null),
IF(CHAR_LENGTH(table_row_text) > 0, table_row_text, null),
IF(CHAR_LENGTH(heading_1_text) > 0, heading_1_text, null),
IF(CHAR_LENGTH(heading_2_text) > 0, heading_2_text, null),
IF(CHAR_LENGTH(heading_3_text) > 0, heading_3_text, null),
IF(CHAR_LENGTH(numbered_list_item_text) > 0, numbered_list_item_text, null),
IF(CHAR_LENGTH(bulleted_list_item_text) > 0, bulleted_list_item_text, null)
) as text
from extracted_text
),
aggregated_text as (
/* Aggregated text string per Notion page */
select
parent_page_id, string_agg(text) as text
from combined_text
group by parent_page_id
)
select
last_edited_time, url, text as notion_text
from aggregated_text
join {{ source('notion_raw', 'notion_pages') }} on id = parent_page_id
where text is not null /* Pinecone connector will fail if we attempt to
insert with a null text value. */
================================================
FILE: data_to_pinecone_llm/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
type: bigquery
project: "{{ env_var('BIGQUERY_PROJECT_ID') }}"
dataset: "{{ env_var('BIGQUERY_DATASET_ID') }}"
location: "{{ env_var('BIGQUERY_DATASET_LOCATION') }}"
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH') }}"
method: service-account
priority: interactive
job_execution_timeout_seconds: 300
job_retries: 1
threads: 1
target: dev
================================================
FILE: data_to_pinecone_llm/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: data_to_pinecone_llm/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: data_to_pinecone_llm/dbt_project/tests/.gitkeep
================================================
================================================
FILE: data_to_pinecone_llm/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: data_to_pinecone_llm/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.4"
constraints = "0.3.4"
hashes = [
"h1:0AHJKsRTlX6BCJZCJw5/oHsN97zi1AP33JeuPMwoX6U=",
"h1:E97NK92naRr/9iAtDxA1PJ1aQYWR/vqWN10ThuQjUn8=",
"zh:02167e00f7e89b6f09ae8796b9ee0ac2d8702b5cb295cb27d7a79266ffafe196",
"zh:1ddad39354af090e830caf1e5cce845f24ff0bcef61b73e77ebc7703c2ecf90d",
"zh:223a0a46d354ad0709d5f28d60accb3448ba5f256b84438238fb05235d1e5b34",
"zh:29efd8848b9560456ec3d90f54984670e9d5b7e36f1edd2adb15c5fec3f57166",
"zh:33d31310ba7ec699b5bd64edbb63b0a89bd55d87fae0f55409bbfa5fd7dd4d90",
"zh:35ed0e2894e28ec7762406a18510b789b76b0649ace309eec22acaf10c982f08",
"zh:4ba860918b65c00cc596d0b5b40068b89a72a300604a62bca7d286073779e684",
"zh:59a0d1128477e587d9dac71f93598bae6050d176d29c840b6ad1bf95529d61e8",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d9bb37e9094eba02acf8d08cf9f3331cd7c26478441d70e74e8d1ec9cb33aaa",
"zh:9e5243eac43950889781a88d4e6186aea240898045e0e3c8fffd3291c5e74b6f",
"zh:a0c31a5bc0cbc4a7341a0d185806a1c6797508580bede71a5009ad7b078d68c2",
"zh:af341259999c6639a1c27e8f116a40b088dd192a3057096dc23a42affc97113f",
"zh:b9779f8f695b4fab56e062abab61eaa58853f20c6411d53b2bd82a66d79a8b49",
"zh:e284d898e5a30e507f1292635542dafe0e95ea8a5a215103a9d96d699aed9e75",
]
}
================================================
FILE: data_to_pinecone_llm/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
locals {
// The BigQuery destination expects a JSON credentials file, but the source expects a JSON string.
// Read JSON file into a string variable, so we can use the same contents also for the source connector.
bigquery_credentials_json = replace(
file(var.bigquery_credentials_json_file_path), "\n", ""
) // <- If this fails, please double check that the file exists.
airbyte_cloud_url = var.airbyte_workspace_id != null ? "https://cloud.airbyte.com/workspaces/${var.airbyte_workspace_id}/connections" : "(n/a)"
}
// Sources
resource "airbyte_source_bigquery" "bigquery" {
configuration = {
credentials_json = local.bigquery_credentials_json
project_id = var.bigquery_project_id
dataset_id = var.bigquery_dataset_id
source_type = "bigquery"
}
name = "BigQuery Publishing Source"
workspace_id = var.airbyte_workspace_id
}
resource "airbyte_source_notion" "notion_source" {
configuration = {
credentials = {
source_notion_authenticate_using_access_token = {
auth_type = "token"
token = var.notion_token
}
}
source_type = "notion"
start_date = "2023-01-01T00:00:00.001Z" # Note: Fractional seconds with .000 may not work.
}
name = "Notion Data Source"
workspace_id = var.airbyte_workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.bigquery_dataset_id
dataset_location = var.bigquery_dataset_location
destination_type = "bigquery"
project_id = var.bigquery_project_id
credentials_json = local.bigquery_credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery Raw Data Destination"
workspace_id = var.airbyte_workspace_id
}
resource "airbyte_destination_pinecone" "pinecone" {
configuration = {
destination_type = "pinecone"
embedding = {
destination_pinecone_embedding_open_ai = {
openai_key = var.openai_key
mode = "openai"
}
}
indexing = {
index = var.pinecone_index
pinecone_environment = var.pinecone_environment
pinecone_key = var.pinecone_key
}
processing = {
chunk_overlap = 16
chunk_size = 1024
metadata_fields = ["url", "last_edited_time"]
text_fields = ["notion_text"]
}
}
name = "Pinecone Publish Destination"
workspace_id = var.airbyte_workspace_id
}
// Connections
resource "airbyte_connection" "notion_connection" {
name = "Notion to BigQuery"
source_id = airbyte_source_notion.notion_source.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
namespace_definition = "custom_format"
namespace_format = var.bigquery_dataset_id
prefix = "notion_"
configurations = {
streams = [
{ name = "blocks" },
{ name = "pages" },
{ name = "users" }
]
}
}
resource "airbyte_connection" "bigquery_to_pinecone" {
name = "Publish BigQuery Data to Pinecone"
source_id = airbyte_source_bigquery.bigquery.source_id
destination_id = airbyte_destination_pinecone.pinecone.destination_id
configurations = {
streams = [
{
name = "notion_data",
cursor_field = ["last_edited_time"],
primary_key = [["url"]]
sync_mode = "incremental_deduped_history"
}
]
}
}
================================================
FILE: data_to_pinecone_llm/infra/airbyte/output.tf
================================================
// These variables will be printed at the end of a successful `terraform apply`,
// or by running `terraform output` at any time.
output "airbyte_cloud_url" {
value = local.airbyte_cloud_url
}
================================================
FILE: data_to_pinecone_llm/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
/////////////////////////////////
// Airbyte Provider Definition //
/////////////////////////////////
// Uncomment the OSS or Cloud block, depending on your desired deployment location:
// Airbyte Cloud:
provider "airbyte" {
bearer_auth = var.airbyte_cloud_auth_key
}
# // Airbyte OSS:
# provider "airbyte" {
# // Optionally override the airbyte-api-server URL
# server_url = "http://localhost:8006/v1"
# // Optionally override the default password/username below
# username = "airbyte"
# password = "password"
# }
================================================
FILE: data_to_pinecone_llm/infra/airbyte/variables.tf
================================================
// Airbyte
variable "airbyte_workspace_id" {
description = < What do you want to know?"))
console.print("")
while True:
try:
query = input("")
except KeyboardInterrupt:
console.print("\n")
console.print(Markdown("_Goodbye!_ 👋"))
exit(0)
answer = qa.run(query)
console.print(Markdown(answer))
console.print(Markdown("\n------\n> What else do you want to know?\n"))
console.print("\n")
================================================
FILE: data_to_pinecone_llm/quickstart.md
================================================
# Quick Start
```bash
# Change to the quickstart directory
cd data_to_pinecone_llm
# Create a new `.env` file from the template
cp .env.template .env
# Edit the `.env` file and provide all needed variables
code .env
# Export the .env file variables
set -o allexport && source .env && set +o allexport
# Deploy terraform
terraform -chdir=infra/airbyte init
terraform -chdir=infra/airbyte apply
# Print Airbyte Cloud workspace URL
terraform -chdir=infra/airbyte output
# In Airbyte Cloud:
# 1. Peform any needed fine-tuning or debugging.
# 2. Run the source data connection at least once.
# Build dbt models
dbt run --project-dir=dbt_project --profiles-dir=dbt_project
# In Airbyte Cloud:
# 1. Create the publish connection if needed.
# 2. Run the publish connection at least once.
# Create and activate the virtual environment for the AI chatbot
python -m venv .venv
source .venv/bin/activate
# Run the chatbot
./query.py
```
Please see the [Readme](README.md) for more detailed instructions.
================================================
FILE: data_to_pinecone_llm/secrets/.gitignore
================================================
# Git should ignore everything in this directory except this gitignore file and the readme.
*
!README.md
!.gitignore
================================================
FILE: data_to_pinecone_llm/secrets/README.md
================================================
# Secrets directory
You can store secrets here and they will be ignored by git.
For instance, you can save your Google Credentials JSON file here in this directory.
================================================
FILE: data_to_pinecone_llm/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="data_to_pinecone_llm",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"pinecone-client",
"langchain",
"openai==0.28.1",
"tiktoken",
"rich"
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: database_snapshot/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: database_snapshot/README.md
================================================
# Database Snapshot
Welcome to the "Database Snapshot" repository! This repo provides a quickstart template for building a full data stack that creates a table snapshot from a database and stores it in an Amazon S3 bucket as a JSONL file using Airbyte and then loads the snapshot file to a preferred data warehouse, also using Airbyte.
In this quickstart, we will easily snapshot a sample table from Postgres and then load the table snapshot into BigQuery. The snapshot creation from the database and snapshot loading into the data warehouse are scheduled as one time operations. While this template doesn't delve into specific data, its goal is to showcase the synergy of these tools.
Like other quickstarts, this is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Database Snapshot](#database-snapshot)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Running the Stack](#3-running-the-stack)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add database_snapshot
```
2. **Navigate to the directory**:
```bash
cd database_snapshot
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
You'll also find three crucial Terraform modules:
- `connections`: Contains the configuration files for the Airbyte connections.
- `destinations`: Contains the configuration files for the Airbyte destination connector(s).
- `sources`: Contains the configuration files for the Airbyte source connector(s).
In each terraform module, you will find the following Terraform configuration files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
- `outputs.tf`: Defines exported data or metadata about your resources.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres, S3 (source and destination) and BigQuery connections. You can utilize the `variables.tf` files to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources. A few things to note:
1. The S3 source connector will not be created if a bucket with the same name specified is not present is S3.
2. JSONL files have to be present in the specified bucket path. Otherwise, the snapshot loading connection (S3 to BigQuery) will not be created because the absence of the JSONL files will prevent Airbyte from detecting a stream.
Fixing the first issue (if you encounter it) is straightforward - just create a new bucket with a unique name. However, since you will be snapshotting the database table for the first time, you will definitely have to fix the second issue before you proceed. There are 2 options for this:
1. **(Recommended)** Run the snapshot operation first. This populates the S3 bucket with actual JSONL files for the loading connection to pick up for stream detection. Then, create the loading connection and run it.
2. Upload an empty JSONL file to your S3 bucket with the bucket path you have specified in the destination connector config.
Now proceed to create the Airbyte resources.
```bash
terraform apply
```
You will get an error that the `S3 to BigQuery` connection cannot be created. Ignore this and proceed. You should now have 5 resources (4 connectors and 1 connection) created in you Airbyte environment;
1. The Postgres Database Source
2. The S3 Destination
3. The S3 Source
4. The BigQuery Destination
5. The Postgres to S3 Conneection
Confirm this by running the command below
```bash
terraform state list
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Running the Stack
After verifying that the 4 connectors and 1 connection have been created, you can proceed to run the snapshot operation.
1. **Run the Database Snapshot**:
Navigate to the `Postgres to S3` connection and click `Sync now` to snapshot the database table and save it to your S3.
2. **Create the Loading Connection**:
Once the snapshot has successfully completed, navigate back to your terraform home directory and create the second connection.
```bash
terraform apply
```
Now you should not see an error since the JSONL files are now present in the S3 bucket. Verify that the connection was created in the Airbyte UI and proceed to `Sync now`.
You should now have your table snapshot loaded to your data warehouse.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Add more Tables from Database**:
You can add more tables from your database to snapshot.
2. **Create dbt transformations**:
You can create transformations using dbt, depending on your use case for the loaded data in your data warehouse. Add SQL transformations in dbt.
3. **Scheduling**:
You can run both the snapshot and the snapshot loading syncs on schedule depending on your use case. This can be done within Airbyte. For extensibility, you can use workflow orchestration tools like Dagster, Airflow or Prefect.
4. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: database_snapshot/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: database_snapshot/infra/airbyte/connections/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Connections
resource "airbyte_connection" "postgres_to_s3" {
name = "Postgres to S3"
source_id = var.postgres_id
destination_id = var.s3_dest_id
schedule = {
schedule_type = "manual"
}
configurations = {
streams = [
{
cursor_field = ["...",]
name = "...my_table_name_1..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
}
]
}
}
resource "airbyte_connection" "s3_to_bigquery" {
name = "S3 to BigQuery"
source_id = var.s3_source_id
destination_id = var.bigquery_id
schedule = {
schedule_type = "manual"
}
configurations = {
streams = [
{
cursor_field = ["...",]
name = "...my_table_name_1..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
}
]
}
}
================================================
FILE: database_snapshot/infra/airbyte/connections/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: database_snapshot/infra/airbyte/connections/variables.tf
================================================
variable "s3_source_id" {
type = string
}
variable "postgres_id" {
type = string
}
variable "s3_dest_id" {
type = string
}
variable "bigquery_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: database_snapshot/infra/airbyte/destinations/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = "...my_dataset_id..."
dataset_location = "...my_dataset_location..."
destination_type = "bigquery"
project_id = "...my_project_id..."
credentials_json = "...my_credentials_json_string..."
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
resource "airbyte_destination_s3" "s3" {
configuration = {
destination_type = "s3"
format = {
destination_s3_output_format_json_lines_newline_delimited_json = {
format_type = "JSONL"
}
}
s3_bucket_name = "...my_bucket..."
s3_bucket_path = "postgres_snapshot/table"
s3_bucket_region = "...my_bucket_region..."
access_key_id = "...my_aws_access_key_id..."
secret_access_key = "...my_aws_secret_access_key..."
}
name = "S3"
workspace_id = var.workspace_id
}
================================================
FILE: database_snapshot/infra/airbyte/destinations/outputs.tf
================================================
output "bigquery_id" {
value = airbyte_destination_bigquery.bigquery.destination_id
}
output "s3_id" {
value = airbyte_destination_s3.s3.destination_id
}
================================================
FILE: database_snapshot/infra/airbyte/destinations/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: database_snapshot/infra/airbyte/destinations/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: database_snapshot/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
module "sources" {
source = "./sources"
workspace_id = "...my_workspace_id..."
}
module "destination" {
source = "./destinations"
workspace_id = "...my_workspace_id..."
}
module "connections" {
source = "./connections"
s3_dest_id = module.destination.s3_id
s3_source_id = module.sources.s3_id
postgres_id = module.sources.postgres_id
bigquery_id = module.destination.bigquery_id
}
================================================
FILE: database_snapshot/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: database_snapshot/infra/airbyte/sources/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
scan_changes_with_user_defined_cursor = {}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
resource "airbyte_source_s3" "s3" {
configuration = {
bucket = "...my_bucket..."
source_type = "s3"
streams = [
{
file_type = "...my_file_type..."
name = "...my_stream_name..."
format = {
source_s3_file_based_stream_config_format_jsonl_format = {
filetype = "jsonl"
}
}
globs = [
"postgres_snapshot/table/...my_stream_name.../*.jsonl*",
]
primary_key = "...my_primary_key..."
},
]
aws_access_key_id = "...my_aws_access_key_id..."
aws_secret_access_key = "...my_aws_secret_access_key..."
}
name = "S3"
workspace_id = var.workspace_id
}
================================================
FILE: database_snapshot/infra/airbyte/sources/outputs.tf
================================================
output "postgres_id" {
value = airbyte_source_postgres.postgres.source_id
}
output "s3_id" {
value = airbyte_source_s3.s3.source_id
}
================================================
FILE: database_snapshot/infra/airbyte/sources/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: database_snapshot/infra/airbyte/sources/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: database_snapshot/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: database_snapshot/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="database-snapshot",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: developer_productivity_analytics_github/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: developer_productivity_analytics_github/README.md
================================================
# Developer Productivity Analytics Stack With Github, Airbyte, Dbt, Dagster and BigQuery
Welcome to the "Developer Productivity Analytics Stack" repository! ✨ This is your go-to place to easily set up a data stack using , Airbyte Github, Dbt, BigQuery, and Dagster. With this setup, you can pull Github data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:
.svg>)
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add developer_productivity_analytics_github
```
2. **Navigate to the directory**:
```bash
cd developer_productivity_analytics_github
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Customer Satisfaction Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
================================================
FILE: developer_productivity_analytics_github/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: developer_productivity_analytics_github/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/avarage_time_to_merge_pr_analysis.sql
================================================
SELECT
AVG(TIMESTAMP_DIFF(merged_at, created_at, SECOND)) AS avg_merge_time_seconds
FROM
transformed_data.stg_pull_requests
WHERE
merged_at IS NOT NULL
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/commits_over_time_per_dev_analysis.sql
================================================
SELECT
DATE(created_at) AS commit_date,
JSON_EXTRACT_SCALAR(author, '$.login') AS developer_username,
COUNT(*) AS num_commits
FROM
transformed_data.stg_commits
WHERE
JSON_EXTRACT_SCALAR(author, '$.login') = 'developer_username'
GROUP BY
commit_date, developer_username
ORDER BY
commit_date
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/dev_activity_by_day_of_week_analysis.sql
================================================
SELECT
EXTRACT(DAYOFWEEK FROM created_at) AS day_of_week,
EXTRACT(HOUR FROM created_at) AS hour_of_day,
JSON_EXTRACT_SCALAR(author, '$.login') AS developer_username,
COUNT(*) AS num_commits
FROM
transformed_data.stg_commits
GROUP BY
day_of_week, hour_of_day, developer_username
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/dev_collaboration_network_analysis.sql
================================================
WITH Collaboration AS (
SELECT
JSON_EXTRACT_SCALAR(a.author, '$.login') AS developer1,
JSON_EXTRACT_SCALAR(b.author, '$.login') AS developer2,
COUNT(*) AS num_collaborations
FROM
transformed_data.stg_commits AS a
JOIN
transformed_data.stg_commits AS b
ON
a.repository = b.repository
AND a.sha <> b.sha
GROUP BY
developer1, developer2
)
SELECT
developer1,
developer2,
num_collaborations
FROM
Collaboration
WHERE
num_collaborations > 5 -- Adjust threshold as needed
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/freq_of_code_contribution_analysis.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(author, '$.login') AS developer_username,
COUNT(*) AS num_contributions
FROM
transformed_data.stg_commits
GROUP BY
developer_username
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/no_of_code_reviews_per_dev_analysis.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(requested_reviewers, '$.users[0].login') AS developer_username,
COUNT(*) AS num_reviews
FROM
transformed_data.stg_pull_requests
WHERE
JSON_EXTRACT_SCALAR(requested_reviewers, '$.users[0].login') IS NOT NULL
GROUP BY
developer_username
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/no_of_commits_per_dev_per_repo_analysis.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(author, '$.login') AS developer_username,
repository,
COUNT(*) AS num_commits
FROM
transformed_data.stg_commits
GROUP BY
developer_username, repository
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/no_of_pr_per_dev_analysis.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(user, '$.login') AS developer_username,
COUNT(*) AS num_pull_requests_opened
FROM
transformed_data.stg_pull_requests
GROUP BY
developer_username
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/number_of_pr_open_or_closed.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(user, '$.login') AS username,
SUM(CASE WHEN state = 'opened' THEN 1 ELSE 0 END) AS opened_prs,
SUM(CASE WHEN state = 'closed' THEN 1 ELSE 0 END) AS closed_prs
FROM transformed_data.stg_pull_requests
GROUP BY username
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/top_collaborators_by_repo_analysis.sql
================================================
WITH TopCollaborators AS (
SELECT
repository,
JSON_EXTRACT_SCALAR(author, '$.login') AS developer_username,
COUNT(*) AS num_commits
FROM
transformed_data.stg_commits
GROUP BY
repository, developer_username
)
SELECT
repository,
developer_username,
num_commits
FROM (
SELECT
repository,
developer_username,
num_commits,
ROW_NUMBER() OVER (PARTITION BY repository ORDER BY num_commits DESC) AS rn
FROM
TopCollaborators
)
WHERE
rn <= 5 -- Adjust the number of top collaborators as needed
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/marts/track_issues_assigned_by_dev_analysis.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(assignee, '$.login') AS developer_username,
COUNT(*) AS num_issues_assigned
FROM
transformed_data.stg_issues
GROUP BY
developer_username
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/sources/github_source.yml
================================================
version: 2
sources:
- name: github
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: github_airbyte
tables:
- name: users
description: "Simulated user data from the Github connector."
columns:
- name: id
description: "Unique identifier for the users."
- name: avatar_url
- name: events_url
- name: followers_url
- name: following_url
- name: gists_url
- name: gravatar_url
- name: html_url
- name: login
- name: node_id
- name: organization
- name: organizations_url
- name: received_events_url
- name: repos_url
- name: site_admin
- name: starred_url
- name: subscriptions_url
- name: type
- name: url
- name: teams
description: "Simulated team data from the Github connector."
columns:
- name: id
description: "Unique identifier for the teams."
- name: description
- name: members_url
- name: name
- name: notifaction_setting
- name: parent
- name: permission
- name: html_url
- name: privacy
- name: node_id
- name: organization
- name: repositories_url
- name: slug
- name: url
- name: tags
description: "Simulated tag data from the Github connector."
columns:
- name: name
- name: commit
- name: tarball_url
- name: zipball_url
- name: repository
- name: node_id
- name: stargazers
description: "Simulated stargazer data from the Github connector."
columns:
- name: user
- name: user_id
- name: starred_at
- name: repository
- name: reviews
description: "Simulated review data from the Github connector."
columns:
- name: id
description: "Unique identifier for the reviews."
- name: user
- name: author_association
- name: body
- name: repository
- name: commit_id
- name: created_at
- name: html_url
- name: node_id
- name: pull_request_url
- name: state
- name: submitted_at
- name: updated_at
- name: _links
- name: review_comments
description: "Simulated review_comment data from the Github connector."
columns:
- name: id
description: "Unique identifier for the review_comments."
- name: user
- name: author_association
- name: body
- name: repository
- name: commit_id
- name: created_at
- name: html_url
- name: node_id
- name: pull_request_url
- name: diff_hunk
- name: in_reply_to_id
- name: updated_at
- name: _links
- name: line
- name: original_commit_id
- name: original_line
- name: original_position
- name: original_start_line
- name: path
- name: position
- name: pull_request_review_id
- name: reactions
- name: side
- name: start_line
- name: start_side
- name: subject_type
- name: url
- name: repositories
description: "Simulated repository data from the Github connector."
columns:
- name: id
description: "Unique identifier for the repositories."
- name: allow_forking
- name: archive_url
- name: archived
- name: assignees_url
- name: blobs_url
- name: branches_url
- name: clone_url
- name: collaborators_url
- name: comments_url
- name: commits_url
- name: compare_url
- name: contents_url
- name: contributors_url
- name: created_at
- name: default_branch
- name: deployments_url
- name: description
- name: disabled
- name: downloads_url
- name: events_url
- name: fork
- name: forks
- name: forks_count
- name: forks_url
- name: full_name
- name: git_commits_url
- name: git_refs_url
- name: git_tags_url
- name: git_url
- name: has_discussions
- name: has_downloads
- name: has_issues
- name: has_pages
- name: has_projects
- name: has_wiki
- name: homepage
- name: hooks_url
- name: html_url
- name: is_template
- name: issue_comment_url
- name: issue_events_url
- name: issues_url
- name: keys_url
- name: labels_url
- name: language
- name: languages_url
- name: license
- name: merges_url
- name: milestones_url
- name: mirror_url
- name: name
- name: node_id
- name: notifications_url
- name: open_issues
- name: open_issues_count
- name: organization
- name: owner
- name: permissions
- name: private
- name: pulls_url
- name: pushed_at
- name: releases_url
- name: security_and_analysis
- name: size
- name: ssh_url
- name: stargazers_count
- name: stargazers_url
- name: statuses_url
- name: subscribers_url
- name: subscription_url
- name: svn_url
- name: tags_url
- name: teams_url
- name: topics
- name: trees_url
- name: updated_at
- name: url
- name: visibility
- name: watchers
- name: watchers_count
- name: web_commit_signoff_required
- name: pull_requests
description: "Simulated pull_request data from the Github connector."
columns:
- name: id
description: "Unique identifier for the pull_requests."
- name: active_lock_reason
- name: assignee
- name: assignees
- name: author_association
- name: auto_merge
- name: base
- name: body
- name: closed_at
- name: comments_url
- name: commits_url
- name: created_at
- name: diff_url
- name: draft
- name: head
- name: html_url
- name: issue_url
- name: labels
- name: locked
- name: merge_commit_sha
- name: merged_at
- name: milestone
- name: node_id
- name: number
- name: patch_url
- name: repository
- name: requested_reviewers
- name: requested_teams
- name: review_comment_url
- name: review_comments_url
- name: state
- name: statuses_url
- name: title
- name: updated_at
- name: url
- name: user
- name: organizations
description: "Simulated organization data from the Github connector."
columns:
- name: id
description: "Unique identifier for the organizations."
- name: advanced_security_enabled_for_new_repositories
- name: archived_at
- name: archived_at
- name: billing_email
- name: blog
- name: collaborators
- name: company
- name: created_at
- name: default_repository_permission
- name: dependabot_alerts_enabled_for_new_repositories
- name: dependabot_security_updates_enabled_for_new_repositories
- name: dependency_graph_enabled_for_new_repositories
- name: description
- name: disk_usage
- name: email
- name: events_url
- name: followers
- name: following
- name: has_organization_projects
- name: has_repository_projects
- name: hooks_url
- name: html_url
- name: is_verified
- name: issues_url
- name: location
- name: login
- name: members_allowed_repository_creation_type
- name: members_can_create_internal_repositories
- name: members_can_create_pages
- name: members_can_create_private_pages
- name: members_can_create_private_repositories
- name: members_can_create_public_pages
- name: members_can_create_public_repositories
- name: members_can_create_repositories
- name: members_can_fork_private_repositories
- name: members_url
- name: name
- name: node_id
- name: owned_private_repos
- name: plan
- name: private_gists
- name: public_gists
- name: public_members_url
- name: public_repos
- name: repos_url
- name: secret_scanning_enabled_for_new_repositories
- name: secret_scanning_push_protection_custom_link
- name: secret_scanning_push_protection_custom_link_enabled
- name: secret_scanning_push_protection_enabled_for_new_repositories
- name: total_private_repos
- name: twitter_username
- name: two_factor_requirement_enabled
- name: type
- name: updated_at
- name: url
- name: web_commit_signoff_required
- name: issues
description: "Simulated issue data from the Github connector."
columns:
- name: id
description: "Unique identifier for the issues."
- name: active_lock_reason
- name: assignee
- name: assignees
- name: author_association
- name: body
- name: closed_at
- name: comments
- name: comments_url
- name: created_at
- name: draft
- name: events_url
- name: html_url
- name: labels
- name: labels_url
- name: locked
- name: milestone
- name: node_id
- name: node_id
- name: performed_via_github_app
- name: pull_request
- name: reactions
- name: repository
- name: repository_url
- name: state
- name: state_reason
- name: timeline_url
- name: title
- name: updated_at
- name: url
- name: user
- name: user_id
- name: commits
description: "Simulated commit data from the Github connector."
columns:
- name: author
- name: branch
- name: comments_url
- name: commit
- name: committer
- name: created_at
- name: html_url
- name: node_id
- name: parents
- name: repository
- name: sha
- name: url
- name: comments
description: "Simulated comment data from the Github connector."
columns:
- name: id
description: "Unique identifier for the comments."
- name: author_association
- name: body
- name: created_at
- name: html_url
- name: issue_url
- name: node_id
- name: performed_via_github_app
- name: reactions
- name: repository
- name: updated_at
- name: url
- name: user
- name: user_id
- name: branches
description: "Simulated branch data from the Github connector."
columns:
- name: commit
- name: name
- name: protected
- name: protection
- name: protection_url
- name: repository
- name: collaborators
description: "Simulated collaborators data from the Github connector."
columns:
- name: id
description: "Unique identifier for the collaborators."
- name: avatar_url
- name: events_url
- name: followers_url
- name: following_url
- name: gists_url
- name: gravatar_id
- name: html_url
- name: login
- name: node_id
- name: organizations_url
- name: permissions
- name: received_events_url
- name: repos_url
- name: repository
- name: role_name
- name: site_adminl
- name: starred_url
- name: subscriptions_url
- name: type
- name: url
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_branches.sql
================================================
select
*
from {{ source('github', 'branches') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_collaborators.sql
================================================
select
*
from {{ source('github', 'collaborators') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_comments.sql
================================================
select
*
from {{ source('github', 'comments') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_commits.sql
================================================
select
*
from {{ source('github', 'commits') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_issues.sql
================================================
select
*
from {{ source('github', 'issues') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_organizations.sql
================================================
select
*
from {{ source('github', 'organizations') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_pull_requests.sql
================================================
select
*
from {{ source('github', 'pull_requests') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_repositories.sql
================================================
select
*
from {{ source('github', 'repositories') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_review_comments.sql
================================================
select
*
from {{ source('github', 'review_comments') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_reviews.sql
================================================
select
*
from {{ source('github', 'reviews') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_stargazers.sql
================================================
select
*
from {{ source('github', 'stargazers') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_tags.sql
================================================
select
*
from {{ source('github', 'tags') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_teams.sql
================================================
select
*
from {{ source('github', 'teams') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/models/staging/stg_users.sql
================================================
select
*
from {{ source('github', 'users') }}
================================================
FILE: developer_productivity_analytics_github/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: developer_productivity_analytics_github/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: developer_productivity_analytics_github/dbt_project/tests/.gitkeep
================================================
================================================
FILE: developer_productivity_analytics_github/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: developer_productivity_analytics_github/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: developer_productivity_analytics_github/infra/airbyte/main.tf
================================================
// source
resource "airbyte_source_github" "my_source_github" {
configuration = {
credentials = {
source_github_authentication_personal_access_token = {
personal_access_token = var.personal_access_token
}
}
repository = var.repository
source_type = "github"
start_date = "2023-09-01T00:00:00Z"
}
name = "your_name"
workspace_id = var.workspace_id
}
// destination
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// connection
resource "airbyte_connection" "github_bigquery" {
name = "Github to bigquery"
source_id = airbyte_source_github.my_source_github.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "users"
},
{
name = "teams"
},
{
name = "tags"
},
{
name = "stargazers"
},
{
name = "repositories"
},
{
name = "pull_requests"
},
{
name = "organizations"
},
{
name = "issues"
},
{
name = "commits"
},
{
name = "comments"
},
{
name = "branches"
},
{
name = "reviews"
},
{
name = "review_comments"
},
{
name = "collaborators"
}
]
}
}
================================================
FILE: developer_productivity_analytics_github/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: developer_productivity_analytics_github/infra/airbyte/variables.tf
================================================
variable "personal_access_token" {
type = string
}
variable "project_id" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "credentials_json" {
type = string
}
variable "repository" {
type = string
}
================================================
FILE: developer_productivity_analytics_github/orchestration/orchestration/__init__.py
================================================
================================================
FILE: developer_productivity_analytics_github/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password="password"
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance,)
================================================
FILE: developer_productivity_analytics_github/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: developer_productivity_analytics_github/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: developer_productivity_analytics_github/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: developer_productivity_analytics_github/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: developer_productivity_analytics_github/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: developer_productivity_analytics_github/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: ecommerce_analytics_bigquery/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: ecommerce_analytics_bigquery/README.md
================================================
# E-commerce Analytics Stack with Airbyte, dbt, Dagster and BigQuery
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:

## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors](#3-setting-up-airbyte-connectors)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte locally. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform (Optional)**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli). This is an optional step because you can also create and manage Airbyte resources via the UI.
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add ecommerce_analytics_bigquery
```
2. **Navigate to the directory**:
```bash
cd ecommerce_analytics_bigquery
```
At this point you can view the code in your preferred IDE. For example, if you’re using Visual Studio Code, you can execute `code .` to open the code.
3. **Set Up a Virtual Environment**:
You can use the following commands, just make sure to adapt to your specific python installation.
- For Linux and Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors
To set up your Airbyte connectors, you can choose to do it via Terraform, or the UI. Choose one of the two following options.
### 3.1. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations via Terraform, facilitating data synchronization between various platforms. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs:
- Provide credentials for your BigQuery connection in the `main.tf` file.
- `dataset_id`: The name of the BigQuery dataset where Airbyte will load data. In this case, enter “raw_data”.
- `project_id`: Your BigQuery project ID.
- `credentials_json`: The contents of the service account JSON file. You should input a string, so you need to convert the JSON content to string beforehand.
- `workspace_id`: Your Airbyte workspace ID, which can be found in the webapp url. For example, in this url: http://localhost:8000/workspaces/910ab70f-0a67-4d25-a983-999e99e1e395/ the workspace id would be `910ab70f-0a67-4d25-a983-999e99e1e395`.
- Alternatively, you can utilize the `variables.tf` file to manage these credentials:
- You’ll be prompted to enter the credentials when you execute `terraform plan` and `terraform apply`. If going for this option, just move to the next step. If you don’t want to use variables, remove them from the file.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go 🎉.
### 3.2. Setting Up Airbyte Connectors Using the UI
Start by launching the Airbyte UI by going to http://localhost:8000/ in your browser. Then:
1. **Create a source**:
- Go to the Sources tab and click on `+ New source`.
- Search for “faker” using the search bar and select `Sample Data (Faker)`.
- Adjust the Count and optional fields as needed for your use case. You can also leave as is.
- Click on `Set up source`.
2. **Create a destination**:
- Go to the Destinations tab and click on `+ New destination`.
- Search for “bigquery” using the search bar and select `BigQuery`.
- Enter the connection details as needed.
- For simplicity, you can use `Standard Inserts` as the loading method.
- In the `Service Account Key JSON` field, enter the contents of the JSON file. Yes, the full JSON.
- Click on `Set up destination`.
3. **Create a connection**:
- Go to the Connections tab and click on `+ New connection`.
- Select the source and destination you just created.
- Enter the connection details as needed.
- Click on `Set up connection`.
That’s it! Your connection is set up and ready to go! 🎉
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Move to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
- You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details. Specifically, you need to update the Service Account JSON file path and your BigQuery project ID.
- Provide your BigQuery project ID in the `database` field of the `dbt_project/models/sources/faker_sources.yml` file.
If you want to avoid hardcoding credentials in the `profiles.yml` file, you can leverage environment variables. An example of how to use them in this file is provided for the `keyfile` key.
3. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery 🎉.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd ../orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on `view global asset lineage` at the top right corner of the Dagster UI. This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on `Materialize all`. This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
You can go to the Airbyte UI and confirm a sync is running, and then, once the dbt jobs have run, go to your BigQuery console and check the views have been created in the `transformed data` dataset.
## Next Steps
Congratulations on deploying and running the E-commerce Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Expand Your Data Sources**
- Add more data sources to Airbyte. Explore different types of sources available, and see how they can enrich your existing datasets and broaden your analytical capabilities.
### 4. **Enhance Data Quality and Testing**
- Implement data quality tests in dbt to ensure the reliability and accuracy of your transformations. Use dbt's testing features to validate your data and catch issues early on.
### 5. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
### 6. **Scale Your Setup**
- Consider scaling your setup to handle more data, more sources, and more transformations. Optimize your configurations and resources to ensure smooth and efficient processing of larger datasets.
### 7. **Contribute to the Community**
- Share your learnings, optimizations, and new configurations with the community. Contribute to the respective tool’s communities and help others learn and grow.
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/macros/.gitkeep
================================================
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/marts/product_popularity.sql
================================================
WITH base AS (
SELECT
product_id,
COUNT(id) AS purchase_count
FROM {{ ref('stg_purchases') }}
GROUP BY 1
)
SELECT
p.id,
p.make,
p.model,
b.purchase_count
FROM {{ ref('stg_products') }} p
LEFT JOIN base b ON p.id = b.product_id
ORDER BY b.purchase_count DESC
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/marts/purchase_patterns.sql
================================================
SELECT
user_id,
product_id,
purchased_at,
added_to_cart_at,
TIMESTAMP_DIFF(purchased_at, added_to_cart_at, SECOND) AS time_to_purchase_seconds,
returned_at
FROM {{ ref('stg_purchases') }}
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/marts/user_demographics.sql
================================================
WITH base AS (
SELECT
id AS user_id,
gender,
academic_degree,
nationality,
age
FROM {{ ref('stg_users') }}
)
SELECT
gender,
academic_degree,
nationality,
AVG(age) AS average_age,
COUNT(user_id) AS user_count
FROM base
GROUP BY 1, 2, 3
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/sources/faker_sources.yml
================================================
version: 2
sources:
- name: faker
# Use your BigQuery project ID
database: your_project_id
# Use your BigQuery dataset name
schema: raw_data
tables:
- name: users
description: "Simulated user data from the Faker connector."
columns:
- name: id
description: "Unique identifier for the user."
- name: address
- name: occupation
- name: gender
- name: academic_degree
- name: weight
- name: created_at
- name: language
- name: telephone
- name: title
- name: updated_at
- name: nationality
- name: blood_type
- name: name
- name: age
- name: email
- name: height
- name: _airbyte_ab_id
- name: _airbyte_emitted_at
- name: _airbyte_normalized_at
- name: _airbyte_users_hashid
- name: products
description: "Simulated product data from the Faker connector."
columns:
- name: id
description: "Unique identifier for the product."
- name: updated_at
- name: year
- name: price
- name: created_at
- name: model
- name: make
- name: _airbyte_ab_id
- name: _airbyte_emitted_at
- name: _airbyte_normalized_at
- name: _airbyte_users_hashid
- name: purchases
description: "Simulated purchase data from the Faker connector."
columns:
- name: id
description: "Unique identifier for the purchase."
- name: updated_at
- name: purchased_at
- name: user_id
- name: returned_at
- name: product_id
- name: created_at
- name: added_to_cart_at
- name: _airbyte_ab_id
- name: _airbyte_emitted_at
- name: _airbyte_normalized_at
- name: _airbyte_users_hashid
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/staging/stg_products.sql
================================================
select
id,
year,
price,
model,
make,
created_at,
updated_at,
_airbyte_extracted_at,
from {{ source('faker', 'products') }}
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/staging/stg_purchases.sql
================================================
select
id,
user_id,
product_id,
updated_at,
purchased_at,
returned_at,
created_at,
added_to_cart_at,
_airbyte_extracted_at,
from {{ source('faker', 'purchases') }}
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/models/staging/stg_users.sql
================================================
select
id,
gender,
academic_degree,
title,
nationality,
age,
name,
email,
created_at,
updated_at,
_airbyte_extracted_at,
from {{ source('faker', 'users') }}
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: your_project_id
threads: 1
type: bigquery
target: dev
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: ecommerce_analytics_bigquery/dbt_project/tests/.gitkeep
================================================
================================================
FILE: ecommerce_analytics_bigquery/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: ecommerce_analytics_bigquery/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:a6g5uWP/pt1/popVNlKwnTssWNfdYY4KVFPMisN/yvU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: ecommerce_analytics_bigquery/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_faker" "faker" {
configuration = {
always_updated = false
count = 1000
parallelism = 9
records_per_slice = 10
seed = 6
source_type = "faker"
}
name = "Faker"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "faker_to_bigquery" {
name = "Faker to BigQuery"
source_id = airbyte_source_faker.faker.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "users"
},
{
name = "products"
},
{
name = "purchases"
},
]
}
}
================================================
FILE: ecommerce_analytics_bigquery/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: ecommerce_analytics_bigquery/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
================================================
FILE: ecommerce_analytics_bigquery/orchestration/orchestration/__init__.py
================================================
================================================
FILE: ecommerce_analytics_bigquery/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance, key_prefix="faker")
================================================
FILE: ecommerce_analytics_bigquery/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: ecommerce_analytics_bigquery/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: ecommerce_analytics_bigquery/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: ecommerce_analytics_bigquery/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: ecommerce_analytics_bigquery/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: ecommerce_analytics_bigquery/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: elt_simplified_stack/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: elt_simplified_stack/README.md
================================================
# ELT simplified Stack With Github, Airbyte, DBT, Prefect and BigQuery
Welcome to the "ELT simplified Stack" repository! ✨ For extracting from source - and travel towards destination with some intermediate transformations with Airbyte - Github, DBT, BigQuery, and Prefect. With this setup, you can pull Github data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Prefect.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
## Table of Contents
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Prefect](#5-orchestrating-with-prefect)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add elt_simplified
```
2. **Navigate to the directory**:
```bash
cd elt_simplified
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 5. Orchestrating with Prefect
[Prefect](https://prefect.io/) is an orchestration workflow tool that makes it easy to build, run, and monitor data workflows by writing Python code. In this section, we'll walk you through creating a Prefect flow to orchestrate both Airbyte extract and load operations, and dbt transformations with Python:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Prefect orchestration configurations:
```bash
cd ../orchestration
```
2. **Set Environment Variables**:
Prefect requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export AIRBYTE_PASSWORD=password
```
3. **Connect to Prefect's API**:
Open a new terminal window. Start a local Prefect server instance in your virtual environment:
```bash
prefect server start
```
4. **Deploy the Flow**:
When we run the flow script, Prefect will automatically create a flow deployment that you can interact with via the UI and API. The script will stay running so that it can listen for scheduled or triggered runs of this flow; once a run is found, it will be executed within a subprocess.
```bash
python my_elt_flow.py
```
5. **Access Prefect UI in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:4200
```
You can now begin interacting with your newly created deployment!
## Next Steps
Congratulations on deploying and running the elt_simplified quickstart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Extend the Prefect Pipeline**:
- You can create flow runs from this deployment via API calls to be triggered by new data sync in Airbyte rather than on a schedule. You can customize your dbt runs based on the results got from AirbyteSyncResult. You can also migrate the deployment to the Prefect cloud.
### 4. **Extend the Project**:
- The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: elt_simplified_stack/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: elt_simplified_stack/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: elt_simplified_stack/dbt_project/models/marts/commits-per-repo.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(author, '$.login') AS developer_username,
repository,
COUNT(*) AS num_commits
FROM
transformed_data.stg_commits
GROUP BY
developer_username, repository
================================================
FILE: elt_simplified_stack/dbt_project/models/marts/pr-per-dev.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(user, '$.login') AS developer_username,
COUNT(*) AS num_pull_requests_opened
FROM
transformed_data.stg_pull_requests
GROUP BY
developer_username
================================================
FILE: elt_simplified_stack/dbt_project/models/marts/pr-per-status.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(user, '$.login') AS username,
SUM(CASE WHEN state = 'opened' THEN 1 ELSE 0 END) AS opened_prs,
SUM(CASE WHEN state = 'closed' THEN 1 ELSE 0 END) AS closed_prs
FROM transformed_data.stg_pull_requests
GROUP BY username
================================================
FILE: elt_simplified_stack/dbt_project/models/sources/github_source.yml
================================================
version: 2
sources:
- name: github
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: github_airbyte
tables:
- name: pull_requests
description: "Simulated pull_request data from the Github connector."
columns:
- name: id
description: "Unique identifier for the pull_requests."
- name: active_lock_reason
- name: assignee
- name: assignees
- name: author_association
- name: auto_merge
- name: base
- name: body
- name: closed_at
- name: comments_url
- name: commits_url
- name: created_at
- name: diff_url
- name: draft
- name: head
- name: html_url
- name: issue_url
- name: labels
- name: locked
- name: merge_commit_sha
- name: merged_at
- name: milestone
- name: node_id
- name: number
- name: patch_url
- name: repository
- name: requested_reviewers
- name: requested_teams
- name: review_comment_url
- name: review_comments_url
- name: state
- name: statuses_url
- name: title
- name: updated_at
- name: url
- name: user
- name: commits
description: "Simulated commit data from the Github connector."
columns:
- name: author
- name: branch
- name: comments_url
- name: commit
- name: committer
- name: created_at
- name: html_url
- name: node_id
- name: parents
- name: repository
- name: sha
- name: url
================================================
FILE: elt_simplified_stack/dbt_project/models/staging/stg_commits.sql
================================================
select
*
from {{ source('github', 'commits') }}
================================================
FILE: elt_simplified_stack/dbt_project/models/staging/stg_pull_requests.sql
================================================
select
*
from {{ source('github', 'pull_requests') }}
================================================
FILE: elt_simplified_stack/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: elt_simplified_stack/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: elt_simplified_stack/dbt_project/tests/.gitkeep
================================================
================================================
FILE: elt_simplified_stack/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: elt_simplified_stack/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: elt_simplified_stack/infra/airbyte/main.tf
================================================
// source
resource "airbyte_source_github" "my_source_github" {
configuration = {
credentials = {
source_github_authentication_personal_access_token = {
personal_access_token = var.personal_access_token
}
}
repository = var.repository
source_type = "github"
start_date = "2023-09-01T00:00:00Z"
}
name = "your_name"
workspace_id = var.workspace_id
}
// destination
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// connection
resource "airbyte_connection" "github_bigquery" {
name = "Github to bigquery"
source_id = airbyte_source_github.my_source_github.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "pull_requests"
},
{
name = "commits"
}
]
}
}
================================================
FILE: elt_simplified_stack/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: elt_simplified_stack/infra/airbyte/variables.tf
================================================
variable "personal_access_token" {
type = string
}
variable "project_id" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "credentials_json" {
type = string
}
variable "repository" {
type = string
}
================================================
FILE: elt_simplified_stack/orchestration/my_elt_flow.py
================================================
import os
from prefect import flow, task
from prefect_airbyte.server import AirbyteServer
from prefect_airbyte.connections import AirbyteConnection, AirbyteSyncResult
from prefect_airbyte.flows import run_connection_sync
from prefect_dbt.cli.commands import DbtCoreOperation
remote_airbyte_server = AirbyteServer(
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD"),
server_host="localhost",
server_port="8000"
)
remote_airbyte_server.save("my-remote-airbyte-server", overwrite=True)
airbyte_connection = AirbyteConnection(
airbyte_server=remote_airbyte_server,
connection_id="...my_airbyte_connection_id...",
status_updates=True,
)
@task(name="Extract, Load with Airbyte")
def run_airbyte_sync(connection: AirbyteConnection) -> AirbyteSyncResult:
job_run = connection.trigger()
job_run.wait_for_completion()
return job_run.fetch_result()
def run_dbt_commands(commands, prev_task_result):
dbt_task = DbtCoreOperation(
commands=commands,
project_dir="../dbt_project",
profiles_dir="../dbt_project",
wait_for=prev_task_result
)
return dbt_task
@flow(log_prints=True)
def my_elt_flow():
# run Airbyte sync
# airbyte_sync_result: AirbyteSyncResult = run_connection_sync(
# airbyte_connection=airbyte_connection,
# )
airbyte_sync_result = run_airbyte_sync(airbyte_connection)
# run dbt precheck
dbt_init_task = task(name="dbt Precheck")(run_dbt_commands)(
commands=["pwd", "dbt debug", "dbt list"],
prev_task_result=airbyte_sync_result
)
dbt_init_task.run()
# run dbt models
dbt_run_task = task(name="Transform with dbt")(run_dbt_commands)(
commands=["dbt run"],
prev_task_result=dbt_init_task
)
dbt_run_task.run()
if __name__ == "__main__":
# my_elt_flow.visualize()
my_elt_flow()
================================================
FILE: elt_simplified_stack/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="elt-simplified"
packages=find_packages(),
install_requires=[
"prefect",
"prefect-airbyte",
"prefect-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: error_analysis_stack_sentry/Readme.md
================================================
# Optimizing error resolution with Sentry, Airbyte, dbt, dagster and Snowflake
Welcome to the "Optimizing Error Resolution Processes with Sentry Stack" repository. This quickstart guide is designed to assist you in configuring an error analysis stack utilizing Sentry, Airbyte, Snowflake, dbt, and Dagster. Within this framework, error data extracted from Sentry is ingested into Snowflake through the use of Airbyte. Subsequently, data transformations are performed using dbt, and the results can be visually presented through Dagster.
Please find below the detailed steps for setting up the quickstart.
## Table of Contents
- [Data flow Diagram](#data-flow-diagram)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#setting-an-environment-for-your-project)
- There are two ways to setup the connectors of airbyte.
- [1. Using Airbyte UI](#1-using-airbyte-ui)
- [2. Using Terraform to Setup the Connector](#2-using-terraform-to-setup-the-connector)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Data Flow Diagram

## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Snowflake account creation**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in Snowflake. A step-by-step guide is provided [below](#2-setting-up-Snowflake).
## Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add error_analysis_stack_sentry
```
2. **Navigate to the directory**:
```bash
cd error_analysis_stack_sentry
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 1. Using Airbyte UI
To establish the connection and import data from the Sentry into the Snowflake warehouse, kindly proceed by utilizing the Airbyte user interface. The following steps should be adhered to:
1. Run the Airbyte OSS version by following the [documentation](https://docs.airbyte.com/quickstart/deploy-airbyte).
2. Setup the Sentry as source by following [these steps](https://docs.airbyte.com/integrations/sources/sentry).
3. Setup the Snowflake as destination by following [these steps](https://docs.airbyte.com/integrations/destinations/snowflake)
4. Please proceed to configure the synchronization time and select the specific tables you wish to load into Snowflake from Sentry. You can make your selection from the list of available streams.
5. Enjoy :smile:, your data loaded into Snowflake data warehouse from Sentry.
## 2. Using Terraform to Setup the Connector
Airbyte enables you to make connections between different platforms by creating connectors for sources and destinations. In this project, we're using Terraform to automate the setup of these connectors and their connections. Here's how you can do it:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Sentry and Snowflake connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
After Terraform finishes its tasks, go to the Airbyte user interface. You will find your source and destination connectors already set up, along with the connection between them, all ready to use.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, Snowflake. Here’s a step-by-step guide to help you set this up:
First set up the dbt into your local machine by following steps from this [Link](https://docs.getdbt.com/docs/core/installation).
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your Snowflake connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your Snowflake instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to Snowflake.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to Snowflake and checking the dataset name that you provided.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into Snowflake, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Error analysis Quickstart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### **Explore the Data and Insights**
- Dive into the datasets in Snowflake, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
================================================
FILE: error_analysis_stack_sentry/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: error_analysis_stack_sentry/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: error_analysis_stack_sentry/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: error_analysis_stack_sentry/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_sentry_to_snowflake'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_sentry_to_snowflake'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
target-path: "target"
clean-targets:
- "target"
- "dbt_modules"
- "logs"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_sentry_to_snowflake:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: error_analysis_stack_sentry/dbt_project/macros/.gitkeep
================================================
================================================
FILE: error_analysis_stack_sentry/dbt_project/models/example/Insight_Table.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with insight_table as (
SELECT t1.METADATA as METADATA, t1.TITLE as TITLE, t1.LEVEL as LEVEL, t1.CULPRIT as CULPRIT, t2.USER as USER
FROM {{ source('snowflake', 'issues') }} AS t1
INNER JOIN {{ source('snowflake', 'events') }} AS t2
ON t1.ID = t2.GROUPID
)
select *
from insight_table
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: error_analysis_stack_sentry/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: Insight_Table
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: error_analysis_stack_sentry/dbt_project/models/sources.yml
================================================
sources:
- name: snowflake
# Use your snowflake project ID
database: sentry_to_snowflake
# Use your snowflake dataset name
schema: raw_data
tables:
- name: issues
columns:
- name: ANNOTATIONS
- name: ASSIGNEDTO
- name: COUNT
- name: CULPRIT
- name: FIRSTSEEN
- name: HASSEEN
- name: ID
- name: ISBOOKMARKED
- name: ISPUBLIC
- name: ISSUBSCRIBED
- name: LASTSEEN
- name: LEVEL
- name: LOGGER
- name: METADATA
- name: NUMCOMMENTS
- name: PERMALINK
- name: PROJECT
- name: SHAREID
- name: SHORTID
- name: STATS
- name: STATUS
- name: STATUSDETAILS
- name: SUBSCRIPTIONDETAILS
- name: TITLE
- name: TYPE
- name: USERCOUNT
- name: _AIRBYTE_EXTRACTED_AT
- name: _AIRBYTE_META
- name: _AIRBYTE_RAW_ID
- name: events
columns:
- name: DATECREATED
- name: EVENT.TYPE
- name: EVENTID
- name: GROUPID
- name: ID
- name: MESSAGE
- name: PLATFORM
- name: TAGS
- name: TITLE
- name: USER
- name: _AIRBYTE_EXTRACTED_AT
- name: _AIRBYTE_META
- name: _AIRBYTE_RAW_ID
================================================
FILE: error_analysis_stack_sentry/dbt_project/profiles.yml
================================================
dbt_sentry_to_snowflake:
outputs:
dev:
account: YOUR_SNOWFLAKE_ACCOUNT_ID
# User/password auth
user: AIRBYTE_USER
password: password
role: AIRBYTE_ROLE
database: AIRBYTE_DATABASE
warehouse: AIRBYTE_WAREHOUSE
schema: AIRBYTE_SCHEMA
threads: 1
client_session_keep_alive: False
# optional
connect_retries: 0 # default 0
connect_timeout: 10 # default: 10
retry_on_database_errors: False # default: false
retry_all: False # default: false
reuse_connections: False # default: false (available v1.4+)
type: snowflake
target: dev
================================================
FILE: error_analysis_stack_sentry/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: error_analysis_stack_sentry/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: error_analysis_stack_sentry/dbt_project/tests/.gitkeep
================================================
================================================
FILE: error_analysis_stack_sentry/error_analysis_stack.egg-info/PKG-INFO
================================================
Metadata-Version: 2.1
Name: error-analysis-stack
Version: 0.0.1
Provides-Extra: dev
================================================
FILE: error_analysis_stack_sentry/error_analysis_stack.egg-info/SOURCES.txt
================================================
setup.py
error_analysis_stack.egg-info/PKG-INFO
error_analysis_stack.egg-info/SOURCES.txt
error_analysis_stack.egg-info/dependency_links.txt
error_analysis_stack.egg-info/requires.txt
error_analysis_stack.egg-info/top_level.txt
================================================
FILE: error_analysis_stack_sentry/error_analysis_stack.egg-info/dependency_links.txt
================================================
================================================
FILE: error_analysis_stack_sentry/error_analysis_stack.egg-info/requires.txt
================================================
dagster
dagster-cloud
dagster-airbyte
dagster-dbt
dbt-core>=1.4.0
dbt-snowflake
[dev]
dagster-webserver
pytest
================================================
FILE: error_analysis_stack_sentry/error_analysis_stack.egg-info/top_level.txt
================================================
================================================
FILE: error_analysis_stack_sentry/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: error_analysis_stack_sentry/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.4"
constraints = "0.3.4"
hashes = [
"h1:0AHJKsRTlX6BCJZCJw5/oHsN97zi1AP33JeuPMwoX6U=",
"zh:02167e00f7e89b6f09ae8796b9ee0ac2d8702b5cb295cb27d7a79266ffafe196",
"zh:1ddad39354af090e830caf1e5cce845f24ff0bcef61b73e77ebc7703c2ecf90d",
"zh:223a0a46d354ad0709d5f28d60accb3448ba5f256b84438238fb05235d1e5b34",
"zh:29efd8848b9560456ec3d90f54984670e9d5b7e36f1edd2adb15c5fec3f57166",
"zh:33d31310ba7ec699b5bd64edbb63b0a89bd55d87fae0f55409bbfa5fd7dd4d90",
"zh:35ed0e2894e28ec7762406a18510b789b76b0649ace309eec22acaf10c982f08",
"zh:4ba860918b65c00cc596d0b5b40068b89a72a300604a62bca7d286073779e684",
"zh:59a0d1128477e587d9dac71f93598bae6050d176d29c840b6ad1bf95529d61e8",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8d9bb37e9094eba02acf8d08cf9f3331cd7c26478441d70e74e8d1ec9cb33aaa",
"zh:9e5243eac43950889781a88d4e6186aea240898045e0e3c8fffd3291c5e74b6f",
"zh:a0c31a5bc0cbc4a7341a0d185806a1c6797508580bede71a5009ad7b078d68c2",
"zh:af341259999c6639a1c27e8f116a40b088dd192a3057096dc23a42affc97113f",
"zh:b9779f8f695b4fab56e062abab61eaa58853f20c6411d53b2bd82a66d79a8b49",
"zh:e284d898e5a30e507f1292635542dafe0e95ea8a5a215103a9d96d699aed9e75",
]
}
================================================
FILE: error_analysis_stack_sentry/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources - Documentation of Source : https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/source_sentry
resource "airbyte_source_sentry" "my_source_sentry" {
configuration = {
auth_token = ""
# discover_fields = [
# "{ \"see\": \"documentation\" }",
# ]
# hostname = "muted-ingredient.biz"
organization = ""
project = ""
source_type = "sentry"
}
name = "Sentry Source"
# secret_id = "...my_secret_id..."
# secret_id = "...my_secret_id..."
workspace_id = var.workspace_id
}
// Destinations - Documentation of Destination: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs/resources/destination_snowflake
resource "airbyte_destination_snowflake" "my_destination_snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_username_and_password = {
password = ""
}
}
database = ""
destination_type = "snowflake"
host = ""
# jdbc_url_params = "...my_jdbc_url_params..."
# raw_data_schema = "...my_raw_data_schema..."
role = ""
schema = ""
username = ""
warehouse = ""
}
name = "Snowflake Warehouse"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "sentry_to_snowflake" {
name = "Sentry to Snowflake Data Warehouse"
source_id = airbyte_source_sentry.my_source_sentry.source_id
destination_id = airbyte_destination_snowflake.my_destination_snowflake.destination_id
status = "active"
configurations = {
// Available Streams = Comments, Commit comment reactions, Commit comments,
# Commits, Deployments, Events, Issue comment reactions, Issue events, Issue milestones,
# Issue reactions, Issues, Project cards, Project columns, Projects, Pull request comment reactions,
# Pull requests, Pull request stats, Releases, Review comments, Reviews, Stargazers, Workflow runs,
# Workflows
streams = [
{
name = "events"
},
{
name = "issues"
},
{
name="project_detail"
},
{
name="projects"
},
{
name="releases"
}
]
sync_mode = "full_refresh_overwrite"
}
schedule = {
cron_expression = ""
schedule_type = "cron"
}
}
================================================
FILE: error_analysis_stack_sentry/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: error_analysis_stack_sentry/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration/__init__.py
================================================
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance,key_prefix="snowflake")
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration.egg-info/PKG-INFO
================================================
Metadata-Version: 2.1
Name: orchestration
Version: 0.0.1
Requires-Dist: dagster
Requires-Dist: dagster-cloud
Requires-Dist: dagster-dbt
Requires-Dist: dbt-core>=1.4.0
Requires-Dist: dbt-snowflake
Provides-Extra: dev
Requires-Dist: dagster-webserver; extra == "dev"
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration.egg-info/SOURCES.txt
================================================
pyproject.toml
setup.py
orchestration/__init__.py
orchestration/assets.py
orchestration/constants.py
orchestration/definitions.py
orchestration/schedules.py
orchestration.egg-info/PKG-INFO
orchestration.egg-info/SOURCES.txt
orchestration.egg-info/dependency_links.txt
orchestration.egg-info/requires.txt
orchestration.egg-info/top_level.txt
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration.egg-info/dependency_links.txt
================================================
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration.egg-info/requires.txt
================================================
dagster
dagster-cloud
dagster-dbt
dbt-core>=1.4.0
dbt-snowflake
[dev]
dagster-webserver
================================================
FILE: error_analysis_stack_sentry/orchestration/orchestration.egg-info/top_level.txt
================================================
orchestration
================================================
FILE: error_analysis_stack_sentry/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: error_analysis_stack_sentry/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-snowflake",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: error_analysis_stack_sentry/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="error-analysis-stack",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-airbyte",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-snowflake",
],
extras_require={
"dev": [
"dagster-webserver",
"pytest",
]
},
)
================================================
FILE: github_insight_stack/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: github_insight_stack/README.md
================================================
# Github Insight Stack with Airbyte, dbt, Dagster and BigQuery
Welcome to the "GitHub Analytics Stack" repository! 🌟 This is your ultimate destination to seamlessly set up a data stack using Airbyte, dbt, and GitHub API. With this configuration, you can extract repository, commit, and pull request data from GitHub, transform it, and glean insights into code quality, collaboration patterns, and project health.
Dive in and discover how smoothly these tools integrate for an enriched data analytics experience!
## Table of Contents
- [Prerequisites](#prerequisites)
- [Architecture Layout](#architecture-layout)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## Architecture Layout

## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add github_insight_stack
```
2. **Navigate to the directory**:
```bash
cd github_insight_stack
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
7. **Add Normalization to the Airbyte Connection**:
At the moment of creating this Quickstart, it's not possible to select normalization via Terraform, so you need to select that manually. In the Airbyte UI, go to the "Connections" tab, select the "Faker to BigQuery" connection, go to the "Transformation" tab and select "Normalized tabular data" and save your changes.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, you can initiate your data workflows:
Begin with the Airbyte sync to extract data from GitHub and send it to BigQuery.
Next, execute dbt transformations based on the specific models you've developed:
- code_quality
- collaboration_patterns
- project_health
In the Dagster UI, these models should be materialized, showing the transformed GitHub data according to the logic you've implemented in dbt.
When you build custom pipelines in Dagster, make sure they reflect your dbt model names and dependencies. For example, if one model depends on the output of another, ensure that your Dagster pipeline respects this order of execution.
The central idea is to adjust the orchestration setup so it maps directly to your dbt model names and the dependencies among them. Remember, the success of the orchestration will also depend on the correct setup and error-free state of the individual dbt models and Airbyte configurations.
## Next Steps
Kudos on setting up the GitHub Insight Stack! 🎉 Here’s how you can amplify your analytics capabilities:
### 1. Analyze Collaboration Patterns:
Examine the frequency, nature, and spread of commits and pull requests across repositories.
### 2. Evaluate Code Quality:
Monitor pull request acceptance rates, time to merge, and frequency of commits to assess project health and contributor efficiency.
### 3. Expand Data Horizons:
Bring in more GitHub data or integrate other platforms using Airbyte to enrich your dataset.
### 4. Improve Transformations:
Refine your dbt models to get more nuanced insights.
### 5. Scale and Monitor:
Optimize configurations for larger datasets and establish monitoring to ensure data flow remains smooth.
### 6. Engage with the Community:
Share findings, models, or new configurations with the community. Contribute to repositories and help foster collective growth.
================================================
FILE: github_insight_stack/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: github_insight_stack/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: github_insight_stack/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: github_insight_stack/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: github_insight_stack/dbt_project/macros/.gitkeep
================================================
================================================
FILE: github_insight_stack/dbt_project/models/Readme.md
================================================
# Notes
- These models are basic starting points. You can further refine and expand on them based on the insights you
want and the intricacies of your GitHub data.
- Make sure to adjust field names if they don't match the schema of the data you've extracted from GitHub into BigQuery.
- It might also be beneficial to integrate other dbt packages or custom macros to handle more advanced transformations or aggregations as needed.
- Always remember to test your models with dbt to ensure they run correctly and produce the expected results.
================================================
FILE: github_insight_stack/dbt_project/models/sources.yml
================================================
version: 2
sources:
- name: github_raw
database: your_bigquery_project_id
schema: github_raw_data
tables:
- name: repositories
- name: commits
- name: pull_requests
================================================
FILE: github_insight_stack/dbt_project/models/test-models/code_quality.sql
================================================
WITH commit_analysis AS (
SELECT
committer_name,
COUNT(*) AS number_of_commits,
AVG(LENGTH(commit_message)) AS avg_commit_message_length
FROM {{ source('github_raw', 'commits') }}
GROUP BY 1
)
SELECT * FROM commit_analysis
================================================
FILE: github_insight_stack/dbt_project/models/test-models/collaboration_patterns.sql
================================================
WITH pr_analysis AS (
SELECT
author_name,
COUNT(*) AS number_of_prs,
AVG(LENGTH(pr_title)) AS avg_pr_title_length
FROM {{ source('github_raw', 'pull_requests') }}
GROUP BY 1
)
SELECT * FROM pr_analysis
================================================
FILE: github_insight_stack/dbt_project/models/test-models/project_health.sql
================================================
WITH contributor_analysis AS (
SELECT
committer_name,
COUNT(DISTINCT repo_name) AS repos_contributed_to,
COUNT(*) AS number_of_commits
FROM {{ source('github_raw', 'commits') }}
GROUP BY 1
)
SELECT * FROM contributor_analysis
================================================
FILE: github_insight_stack/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: github_transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: your_project_id
threads: 1
type: bigquery
target: dev
================================================
FILE: github_insight_stack/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: github_insight_stack/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: github_insight_stack/dbt_project/tests/.gitkeep
================================================
================================================
FILE: github_insight_stack/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: github_insight_stack/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:a6g5uWP/pt1/popVNlKwnTssWNfdYY4KVFPMisN/yvU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: github_insight_stack/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_github" "github" {
configuration = {
access_token = var.github_access_token // You'll need to provide the GitHub access token as a variable.
repository = var.github_repository // The GitHub repository you want to pull data from.
start_date = "2023-01-01" // Starting date from which data should be pulled. Modify as needed.
source_type = "github"
}
name = "GitHub"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "github_to_bigquery" {
name = "GitHub to BigQuery"
source_id = airbyte_source_github.github.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "commits" // Modify to match the GitHub streams you want to sync.
},
{
name = "issues"
},
{
name = "pull_requests"
},
// Add or remove any other streams as per your needs.
]
}
}
================================================
FILE: github_insight_stack/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: github_insight_stack/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
================================================
FILE: github_insight_stack/orchestration/orchestration/__init__.py
================================================
================================================
FILE: github_insight_stack/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance, key_prefix="github")
================================================
FILE: github_insight_stack/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: github_insight_stack/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: github_insight_stack/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: github_insight_stack/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: github_insight_stack/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: github_insight_stack/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: low_latency_data_availability/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: low_latency_data_availability/README.md
================================================
# Low-Latency Data Availability Stack
Welcome to the "Low-Latency Data Availability Stack" repository! This repo provides a quickstart template for building a Low-Latency Data Availability solution that syncs data from an existing Postgres database to a BigQuery dataset using Airbyte. We will easily replicate the tables and data from the Postgres database to BigQuery with Airbyte using Change Data Capture (CDC) and Postgres Write Ahead Log (WAL). This quickstart also explores using Airbyte Postgres features to ensure near real-time data availability and access. While this template doesn't delve into specific data, its goal is to showcase how the low latency data solution can be achieved with Airbyte.
Just like other Airbyte quickstarts, this quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Low-Latency Data Availability Stack](#low-latency-data-availability-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Achieving Low Latency with Airbyte](#3-achieving-low-latency-with-airbyte)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add low_latency_data_availability
```
2. **Navigate to the directory**:
```bash
cd low_latency_data_availability
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and BigQuery connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your Postgres source and BigQuery destination connectors, as well as the connection between them, set up and ready to go.
## 3. Achieving Low Latency with Airbyte
Using Airbyte for the data replication offers the following for achieving low latency;
- Throughput performances for the Postgres source connector are about 11 MB per second. This offers users the capability to sync Terabytes of tables is a fast, reliable manner.
- For very large database tables, the data are read in chunks. This caters for reliability issues due to the strain on the server or network issues. These chunks are either read successively or even concurrently.
- [Checkpointing](https://docs.airbyte.com/understanding-airbyte/airbyte-protocol/#state--checkpointing). This happens when there is a network error or a server going down for maintenance during a sync operation. Airbyte stores the state of a sync such that we can restart from a known point. This is known as the [CTID](https://enterprisedb.com/postgres-tutorials/what-equivalent-rowid-postgresql#:~:text=The%20ctid%20field%20is%20a,the%20location%20of%20the%20tuple.) markers. Thus, if there is an error, we can restart our read from a last known saved checkpoint.
- Once an initial sync is done, for subsequent incremental syncs, Airbyte can use either of 3 [options](https://docs.airbyte.com/integrations/sources/postgres#postgres-replication-methods) that depend on a reliable cursor to be able to find data that has changed. These options are;
- [CDC](https://docs.airbyte.com/integrations/sources/postgres#cdc),
- [xmin](https://docs.airbyte.com/integrations/sources/postgres#xmin), or
- a user column.
For this quickstart, we will be using the CDC option as it offers the least latency for high volume data sync. If your data is less than 500GB, you can go for the xmin option.
## Next Steps
Once you've set up and launched this initial integration, you can proceed to sync the connection to trigger the data sync. The real power of this quickstart lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Plan your Data Replication**:
Ideally, database replication should be a planned activity. Do not run a data replication job during a production peak. Data latency depends on factors such as the size of data to be moved, geographic location of the source and destination, and other parameters. Ensure you test thoroughly before deploying to production.
2. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more Postgres sources, integrate additional tools, or modify the sync schedule – the floor is yours. The granularity of the replication can also be set by selecting the correct sync mode for each stream (table). Read [sync mode](https://docs.airbyte.com/understanding-airbyte/connections/) for more details. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: low_latency_data_availability/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: low_latency_data_availability/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
read_changes_using_write_ahead_log_cdc = {
publication = "...pub..."
replication_slot = "...slot..."
}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = "...my_dataset_id..."
dataset_location = "...my_dataset_location..."
destination_type = "bigquery"
project_id = "...my_project_id..."
credentials_json = "...my_credentials_json_file_path..."
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_bigquery" {
name = "Postgres to BigQuery [Low Latency Data]"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
cursor_field = ["...",]
name = "...my_table_name_1..."
primary_key = [["...",],]
sync_mode = "incremental_deduped_history"
},
{
cursor_field = ["...",]
name = "...my_table_name_2..."
primary_key = [["...",],]
sync_mode = "incremental_deduped_history"
},
]
}
}
================================================
FILE: low_latency_data_availability/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: low_latency_data_availability/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: low_latency_data_availability/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="low-latency-data-availability",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: mongodb_mysql_integration/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: mongodb_mysql_integration/README.md
================================================
# MongoDB MySQL integration Stack
Welcome to the "MongoDB to MySQL Stack" repository! This repo provides a quickstart template for building a MongoDB data integration solution using Airbyte. We will easily synchronize the NoSQL mongoDB data to SQL type MySQL databases with Airbyte terraform airbyte provider. Also this template involves flexibility and use cases for integrating new sources and connections.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [MongoDB MySQL integration Stack](#mongodb-mysql-integration-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add mongodb_mysql_integration
```
2. **Navigate to the directory**:
```bash
cd mongodb_mysql_integration
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or add some transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: mongodb_mysql_integration/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: mongodb_mysql_integration/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_mongodb" "mongodb" {
configuration = {
auth_source = "admin"
database = "...my_database..."
instance_type = {
source_mongodb_mongo_db_instance_type_mongo_db_atlas = {
cluster_url = "...my_cluster_url..."
instance = "atlas"
}
}
password = "...my_password..."
source_type = "mongodb"
user = "...my_user..."
}
name = "MongoDB-Source"
secret_id = "...my_secret_id..."
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_mysql" "mysql" {
configuration = {
database = "...my_database..."
destination_type = "mysql"
host = "...my_host..."
jdbc_url_params = "...my_jdbc_url_params..."
password = "...my_password..."
port = 3306
tunnel_method = {
destination_mysql_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
username = "...my_username..."
}
name = "MySql-Destination"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "mongodb_to_mysql" {
name = "MongoDB to MySQL"
source_id = airbyte_source_mongodb.mongodb.source_id
destination_id = airbyte_destination_mysql.mysql.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
schedule = {
cron_expression = "...my_cron_expression..."
schedule_type = "cron"
}
}
================================================
FILE: mongodb_mysql_integration/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: mongodb_mysql_integration/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: mongodb_mysql_integration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="postgres-data-replication",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: multisource_aggregation/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: multisource_aggregation/README.md
================================================
# Multisource Database Aggregation
Welcome to the "Multisource Database Aggregation" repository! This repo provides a quickstart template for building a full data stack that aggregates data from multiple databases and data sources using Airbyte and loads the aggregated data in a preferred data warehouse. In this quickstart, we will easily extract data from Postgres and MySQL tables, load it into BigQuery, and apply necessary transformations in the BigQuery dataset using dbt. The data aggregation with Airbyte and transformations with dbt are orchestrated seamlessly with Dagster. While this template doesn't delve into specific data or transformations, its goal is to showcase the synergy of these tools.
Like other quickstarts, this is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Multisource Database Aggregation](#multisource-database-aggregation)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Dagster Pipeline DAG](#dagster-pipeline-dag)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [3. Setting Up the dbt Project](#3-setting-up-the-dbt-project)
- [4. Orchestrating with Dagster](#4-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Dagster Pipeline DAG

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add multisource_aggregation
```
2. **Navigate to the directory**:
```bash
cd multisource_aggregation
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
You'll also find three crucial Terraform modules:
- `connections`: Contains the configuration files for the Airbyte connections.
- `destination_warehouse`: Contains the configuration files for the Airbyte destination connector(s).
- `source_databases`: Contains the configuration files for the Airbyte source connector(s).
In each terraform module, you will find the following Terraform configuration files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
- `outputs.tf`: Defines exported data or metadata about your resources.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres, MySQL and BigQuery connections. You can utilize the `variables.tf` files to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 3. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd ../../dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
## 4. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd ../orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Add more Data(base) sources**:
You can add more databases or data sources from Airbyte's [source catalogue](https://docs.airbyte.com/category/sources). To do this, edit the Terraform `source_databases` module and create a new connection in the `connections` module for each source added.
2. **Create dbt Sources for Airbyte Data**:
Your raw data extracted via Airbyte can be represented as sources in dbt. Start by [creating new dbt sources](https://docs.getdbt.com/docs/build/sources) to represent this data, allowing for structured transformations down the line.
3. **Add Your dbt Transformations**:
With your dbt sources in place, you can now build upon them. Add your custom SQL transformations in dbt, ensuring that you treat the sources as an upstream dependency. This ensures that your transformations work on the most up-to-date raw data.
4. **Execute the Pipeline in Dagster**:
Navigate to the Dagster UI and click on "Materialize all". This triggers the entire pipeline, encompassing the extraction via Airbyte, transformations via dbt, and any other subsequent steps.
5. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or enhance your transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: multisource_aggregation/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
#Desktop Services Store
.DS_Store
#User cookie
.user.yml
================================================
FILE: multisource_aggregation/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: multisource_aggregation/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: multisource_aggregation/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
example:
+materialized: view
================================================
FILE: multisource_aggregation/dbt_project/macros/.gitkeep
================================================
================================================
FILE: multisource_aggregation/dbt_project/models/example/my_first_dbt_model.sql
================================================
/*
Welcome to your first dbt model!
Did you know that you can also configure models directly within SQL files?
This will override configurations stated in dbt_project.yml
Try changing "table" to "view" below
*/
{{ config(materialized='table') }}
with pg_table as (
select * from {{ source('bigquery', 'sample_table') }}
)
with mysql_table as (
select * from {{ source('bigquery', 'test_table') }}
)
select *
from pg_table
union
select *
from mysql_table
/*
Uncomment the line below to remove records with null `id` values
*/
-- where id is not null
================================================
FILE: multisource_aggregation/dbt_project/models/example/my_second_dbt_model.sql
================================================
-- Use the `ref` function to select from other models
select *
from {{ ref('my_first_dbt_model') }}
where id = 1
================================================
FILE: multisource_aggregation/dbt_project/models/example/schema.yml
================================================
version: 2
models:
- name: my_first_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
- name: my_second_dbt_model
description: "A starter dbt model"
columns:
- name: id
description: "The primary key for this table"
tests:
- unique
- not_null
================================================
FILE: multisource_aggregation/dbt_project/models/sources.yml
================================================
version: 2
sources:
- name: bigquery
tables:
- name: sample_table
meta:
dagster:
asset_key: ["pg_sample_table"] # This metadata specifies the corresponding Dagster asset for this dbt source.
- name: test_table
meta:
dagster:
asset_key: ["mysql_test_table"] # This metadata specifies the corresponding Dagster asset for this dbt source.
================================================
FILE: multisource_aggregation/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: my_dataset
job_execution_timeout_seconds: 300
job_retries: 1
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: my_dataset_location
method: service-account
priority: interactive
project: my_project_id
threads: 1
type: bigquery
target: dev
================================================
FILE: multisource_aggregation/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: multisource_aggregation/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: multisource_aggregation/dbt_project/tests/.gitkeep
================================================
================================================
FILE: multisource_aggregation/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: multisource_aggregation/infra/airbyte/connections/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Connections
resource "airbyte_connection" "postgres_to_bigquery" {
name = "Postgres to BigQuery"
source_id = var.postgres_id
destination_id = var.bigquery_id
prefix = "pg_"
schedule = {
schedule_type = "manual"
}
configurations = {
streams = [
{
cursor_field = ["...",]
name = "...my_table_name_1..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
},
{
cursor_field = ["...",]
name = "...my_table_name_2..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
},
]
}
}
resource "airbyte_connection" "mysql_to_bigquery" {
name = "MySQL to BigQuery"
source_id = var.mysql_id
destination_id = var.bigquery_id
prefix = "mysql_"
schedule = {
schedule_type = "manual"
}
configurations = {
streams = [
{
cursor_field = ["...",]
name = "...my_table_name_1..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
},
{
cursor_field = ["...",]
name = "...my_table_name_2..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
},
]
}
}
================================================
FILE: multisource_aggregation/infra/airbyte/connections/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: multisource_aggregation/infra/airbyte/connections/variables.tf
================================================
variable "mysql_id" {
type = string
}
variable "postgres_id" {
type = string
}
variable "bigquery_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: multisource_aggregation/infra/airbyte/destination_warehouse/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = "...my_dataset_id..."
dataset_location = "...my_dataset_location..."
destination_type = "bigquery"
project_id = "...my_project_id..."
credentials_json = "...my_credentials_json_file_path..."
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
================================================
FILE: multisource_aggregation/infra/airbyte/destination_warehouse/outputs.tf
================================================
output "bigquery_id" {
value = airbyte_destination_bigquery.bigquery.destination_id
}
================================================
FILE: multisource_aggregation/infra/airbyte/destination_warehouse/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: multisource_aggregation/infra/airbyte/destination_warehouse/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: multisource_aggregation/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
module "sources" {
source = "./source_databases"
workspace_id = "...my_workspace_id..."
}
module "destination" {
source = "./destination_warehouse"
workspace_id = "...my_workspace_id..."
}
module "connections" {
source = "./connections"
mysql_id = module.sources.mysql_id
postgres_id = module.sources.postgres_id
bigquery_id = module.destination.bigquery_id
}
================================================
FILE: multisource_aggregation/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: multisource_aggregation/infra/airbyte/source_databases/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_mysql" "mysql" {
configuration = {
database = "...my_database..."
host = "...my_host..."
jdbc_url_params = "...my_jdbc_url_params..."
password = "...my_password..."
port = 3306
source_type = "mysql"
username = "...my_username..."
replication_method = {
source_mysql_update_method_scan_changes_with_user_defined_cursor = {
method = "STANDARD"
}
}
ssl_mode = {
source_mysql_ssl_modes_preferred = {
mode = "preferred"
}
}
tunnel_method = {
source_mysql_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
}
name = "MySQL"
secret_id = "...my_secret_id..."
workspace_id = var.workspace_id
}
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
source_postgres_ssl_modes_allow = {
mode = "allow"
}
}
tunnel_method = {
source_postgres_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
replication_method = {
source_postgres_replication_method_standard = {
method = "Standard"
}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
================================================
FILE: multisource_aggregation/infra/airbyte/source_databases/outputs.tf
================================================
output "postgres_id" {
value = airbyte_source_postgres.postgres.source_id
}
output "mysql_id" {
value = airbyte_source_mysql.mysql.source_id
}
================================================
FILE: multisource_aggregation/infra/airbyte/source_databases/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = var.airbyte_password
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: multisource_aggregation/infra/airbyte/source_databases/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: multisource_aggregation/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
variable "airbyte_password" {
type = string
default = "password"
}
================================================
FILE: multisource_aggregation/orchestration/orchestration/__init__.py
================================================
================================================
FILE: multisource_aggregation/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance)
================================================
FILE: multisource_aggregation/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: multisource_aggregation/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: multisource_aggregation/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: multisource_aggregation/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: multisource_aggregation/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: multisource_aggregation/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="multisource-database-aggregation",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: mysql_to_postgres_incremental_stack/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: mysql_to_postgres_incremental_stack/README.md
================================================
# Mysql to Postgres Incremental Stack
Welcome to the "Mysql to Postgres Incremental Stack" repository! This repo provides a quickstart template for building a one-off database migration solution from an existing MySQL database to a Postgres database with incremental sync using Airbyte. We will easily migrate the tables and data from the MySQL database to the Postgres database with Airbyte using Change Data Capture (CDC). While this template doesn't delve into specific data, its goal is to showcase how the database migration solution can be achieved with Airbyte.
Just like other Airbyte quickstarts, this quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Mysql to Postgres Incremental Stack](#mysql-to-postgres-incremental-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add mysql_to_postgres_incremental_stack
```
2. **Navigate to the directory**:
```bash
cd mysql_to_postgres_incremental_stack
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and MySQL connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your Postgres source and MySQL destination connectors, as well as the connection between them, set up and ready to go.
## Next Steps
Once you've set up and launched this initial integration, you can proceed to sync the connection to trigger a one-off migration. The real power of this quickstart lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more MySQL sources, migrate to more than one Postgres databases, integrate additional tools, or modify the sync schedule – the floor is yours. The granularity of the migration can also be set by selecting the correct sync mode for each stream (table). Read [sync mode](https://docs.airbyte.com/understanding-airbyte/connections/) for more details. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: mysql_to_postgres_incremental_stack/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: mysql_to_postgres_incremental_stack/infra/airbyte/main.tf
================================================
// Source
resource "airbyte_source_mysql" "my_source_mysql" {
configuration = {
database = "...my_database..."
host = "...my_host..."
port = 3306
replication_method = {
source_mysql_update_method_read_changes_using_binary_log_cdc_ = {
initial_waiting_seconds = 10
method = "CDC"
server_time_zone = "...my_server_time_zone..."
}
}
source_type = "mysql"
tunnel_method = {
source_mysql_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
username = "...my_username..."
}
name = "MySQL"
workspace_id = var.workspace_id
}
// Destination
resource "airbyte_destination_postgres" "my_destination_postgres" {
configuration = {
database = "...my_database..."
destination_type = "postgres"
host = "...my_host..."
password = "...my_password..."
port = 5432
schema = "public"
ssl = true
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
username = "...my_username..."
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Connection
resource "airbyte_connection" "mysql_to_postgres" {
name = "Mysql to Postgres"
source_id = airbyte_source_mysql.my_source_mysql.source_id
destination_id = airbyte_destination_postgres.my_destination_postgres.destination_id
configurations = {
streams = [{
cursor_field = ["...", ]
name = "...my_table_name_1..."
primary_key = [["...", ], ]
sync_mode = "incremental_append"
}]
}
}
================================================
FILE: mysql_to_postgres_incremental_stack/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: mysql_to_postgres_incremental_stack/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: mysql_to_postgres_incremental_stack/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="postgres-to-mysql-migration",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: outdoor_activity_analytics_recreation/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: outdoor_activity_analytics_recreation/README.md
================================================
# Outdoor Activity Analytics Stack With Recreation Api, Airbyte, Dbt, Dagster and BigQuery
Welcome to the "Outdoor Activity Analytics Stack" repository! ✨ This is your go-to place to easily set up a data stack using Recreation Api, Airbyte, Dbt, BigQuery, and Dagster. With this setup, you can pull Recreation Api data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:
.svg>)
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add outdoor_activity_analytics_recreation
```
2. **Navigate to the directory**:
```bash
cd outdoor_activity_analytics_recreation
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
6. **Visualise the Data(optional)**:
This is totally an optional step to visualise the data. We will be using python and matplotlib you can use any of your choice. First we need to install the necessary dependencies and we can do this by the following command.
```bash
pip install google-cloud-bigquery matplotlib
```
Now create a folder named "analyses" under the dbt_project directory. Make sure to name the folder exactly the same as you've mentioned in the `dbt_project.yml` file otherwise it will throw error. Next, create python file under the "analyses" folder with appropriate name like `most_common_activities_in_recareas_analysis.py`. Now write down your python script for the analysis. Make sure to set your BigQuery service account json file path as environment variables and use it to authenticate with BigQuery.
Now after you are done writing your python script go to "analyses" folder.
```bash
cd analyses
```
Now run the following command to run the python file. Make sure to replace `most_common_activities_in_recareas_analysis.py` with your actual file name.
```bash
python most_common_activities_in_recareas_analysis.py
```
You should then see a window displaying a beautiful chart.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Customer Satisfaction Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/analyses/campsite_availability_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
client = bigquery.Client.from_service_account_json(service_account_key_path)
query = """
SELECT
DATE(CreatedDate) AS date,
COUNT(*) AS campsite_count
FROM
transformed_data.stg_campsites
GROUP BY
date
ORDER BY
date
"""
query_job = client.query(query)
results = list(query_job.result())
dates = [row.date for row in results]
campsite_counts = [row.campsite_count for row in results]
plt.figure(figsize=(10, 6))
plt.plot(dates, campsite_counts, marker='o')
plt.xlabel('Date')
plt.ylabel('Campsite Count')
plt.title('Campsite Availability Over Time')
plt.show()
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/analyses/campsite_type_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
client = bigquery.Client.from_service_account_json(service_account_key_path)
query = """
SELECT
CampsiteType,
COUNT(*) AS campsite_count
FROM
transformed_data.stg_campsites
GROUP BY
CampsiteType
"""
query_job = client.query(query)
results = list(query_job.result())
campsite_types = [row.CampsiteType for row in results]
campsite_counts = [row.campsite_count for row in results]
plt.figure(figsize=(10, 6))
plt.barh(campsite_types, campsite_counts)
plt.xlabel('Campsite Count')
plt.ylabel('Campsite Type')
plt.title('Campsite Count by Campsite Type')
plt.show()
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/analyses/count_recareas_by_activity_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
client = bigquery.Client.from_service_account_json(service_account_key_path)
query = """
SELECT
a.ActivityName,
COUNT(r.RecAreaID) AS rec_area_count
FROM
transformed_data.stg_activities AS a
LEFT JOIN
transformed_data.stg_recreationareas AS r
ON
a.ActivityID = CAST(JSON_EXTRACT_SCALAR(r.ACTIVITY, '$.ActivityID') AS INT64)
GROUP BY
a.ActivityName
"""
query_job = client.query(query)
results = list(query_job.result())
activity_names = [row.ActivityName for row in results]
rec_area_counts = [row.rec_area_count for row in results]
plt.figure(figsize=(10, 6))
plt.barh(activity_names, rec_area_counts)
plt.xlabel('Recreational Area Count')
plt.ylabel('Activity Name')
plt.title('Recreational Area Count by Activity')
plt.show()
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/analyses/most_common_activities_in_recareas_analysis.py
================================================
from google.cloud import bigquery
import os
from matplotlib import pyplot as plt
service_account_key_path = os.environ.get('DBT_BIGQUERY_KEYFILE_PATH')
client = bigquery.Client.from_service_account_json(service_account_key_path)
query = """
WITH ActivityCounts AS (
SELECT
ra.RecAreaName,
a.ActivityName,
COUNT(*) AS activity_count
FROM
transformed_data.stg_recreationareas AS ra
LEFT JOIN
transformed_data.stg_activities AS a
ON
CAST(ra.RecAreaID AS INT64) = a.ActivityParentID
GROUP BY
ra.RecAreaName, a.ActivityName
)
SELECT
RecAreaName,
ActivityName,
activity_count
FROM (
SELECT
RecAreaName,
ActivityName,
activity_count,
ROW_NUMBER() OVER (PARTITION BY RecAreaName ORDER BY activity_count DESC) AS rn
FROM ActivityCounts
)
WHERE rn = 1
"""
query_job = client.query(query)
# Get the results
results = list(query_job.result())
# Plot the data
rec_areas = [row.RecAreaName for row in results]
common_activities = [row.ActivityName for row in results]
plt.figure(figsize=(10, 6))
plt.barh(rec_areas, common_activities)
plt.xlabel('Most Common Activity')
plt.ylabel('RecArea Name')
plt.title('Most Common Activity in RecAreas')
plt.show()
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/marts/campsite_availability_over_time.sql
================================================
SELECT
DATE(CreatedDate) AS date,
COUNT(*) AS campsite_count
FROM
transformed_data.stg_campsites
GROUP BY
date
ORDER BY
date
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/marts/campsite_type_counts.sql
================================================
WITH campsite_type_counts AS (
SELECT
CampsiteType,
COUNT(*) AS campsite_count
FROM
{{ ref('stg_campsites') }}
GROUP BY
CampsiteType
)
SELECT
*
FROM
campsite_type_counts
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/marts/count_recarea_by_activity_analysis.sql
================================================
SELECT
a.ActivityName,
COUNT(r.RecAreaID) AS rec_area_count
FROM
transformed_data.stg_activities AS a
LEFT JOIN
transformed_data.stg_recreationareas AS r
ON
a.ActivityID = CAST(JSON_EXTRACT_SCALAR(r.ACTIVITY, '$.ActivityID') AS INT64)
GROUP BY
a.ActivityName
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/marts/most_common_activities_in_recareas.sql
================================================
WITH ActivityCounts AS (
SELECT
ra.RecAreaName,
a.ActivityName,
COUNT(*) AS activity_count
FROM
transformed_data.stg_recreationareas AS ra
LEFT JOIN
transformed_data.stg_activities AS a
ON
CAST(ra.RecAreaID AS INT64) = a.ActivityParentID
GROUP BY
ra.RecAreaName, a.ActivityName
)
SELECT
RecAreaName,
ActivityName,
activity_count
FROM (
SELECT
RecAreaName,
ActivityName,
activity_count,
ROW_NUMBER() OVER (PARTITION BY RecAreaName ORDER BY activity_count DESC) AS rn
FROM ActivityCounts
)
WHERE rn = 1
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/sources/recreation_source.yml
================================================
version: 2
sources:
- name: recreation
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: recreation_airbyte
tables:
- name: recreationareas
description: "Simulated recreationareas data from the Recreation connector."
columns:
- name: RecAreaID
description: "Unique identifier for the recreationareas."
- name: ACTIVITY
- name: Enabled
- name: EVENT
- name: FACILITY
- name: GEOJSON
- name: Keywords
- name: LastUpdatedDate
- name: LINK
- name: MEDIA
- name: ORGANIZATION
- name: OrgRecAreaID
- name: ParentOrgID
- name: RECAREAADDRESS
- name: RecAreaDescription
- name: RecAreaDirections
- name: RecAreaEmail
- name: RecAreaFeeDescription
- name: RecAreaLatitude
- name: RecAreaLongitude
- name: RecAreaMapURL
- name: RecAreaName
- name: RecAreaPhone
- name: RecAreaReservationURL
- name: Reservable
- name: StayLimit
- name: activities
description: "Simulated activities data from the Recreation connector."
columns:
- name: ActivityID
description: "Unique identifier for the activities."
- name: ActivityLevel
- name: ActivityName
- name: ActivityParentID
- name: campsites
description: "Simulated campsites data from the Recreation connector."
columns:
- name: CampsiteID
description: "Unique identifier for the campsites."
- name: ATTRIBUTES
- name: CampsiteAccessible
- name: CampsiteLatitude
- name: CampsiteLongitude
- name: CampsiteName
- name: CampsiteType
- name: CreatedDate
- name: ENTITYMEDIA
- name: FacilityID
- name: LastUpdatedDate
- name: Loop
- name: PERMITTEDEQUIPMENT
- name: TypeOfUse
- name: facilities
description: "Simulated facilities data from the Recreation connector."
columns:
- name: FacilityID
description: "Unique identifier for the facilities."
- name: ACTIVITY
- name: CAMPSITE
- name: Enabled
- name: EVENT
- name: FacilityAdaAccess
- name: FACILITYADDRESS
- name: FacilityDescription
- name: FacilityDirections
- name: FacilityEmail
- name: FacilityLatitude
- name: FacilityLongitude
- name: FacilityMapURL
- name: FacilityName
- name: FacilityPhone
- name: FacilityReservationURL
- name: FacilityTypeDescription
- name: FacilityUseFeeDescription
- name: GEOJSON
- name: Keywords
- name: LastUpdatedDate
- name: LegacyFacilityID
- name: LINK
- name: MEDIA
- name: ORGANIZATION
- name: OrgFacilityID
- name: ParentOrgID
- name: ParentRecAreaID
- name: PERMITENTRANCE
- name: RECAREA
- name: Reservable
- name: StayLimit
- name: TOUR
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/staging/stg_activities.sql
================================================
select
*
from {{ source('recreation', 'activities') }}
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/staging/stg_campsites.sql
================================================
select
*
from {{ source('recreation', 'campsites') }}
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/staging/stg_facilities.sql
================================================
select
*
from {{ source('recreation', 'facilities') }}
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/models/staging/stg_recreationareas.sql
================================================
select
*
from {{ source('recreation', 'recreationareas') }}
================================================
FILE: outdoor_activity_analytics_recreation/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: outdoor_activity_analytics_recreation/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: outdoor_activity_analytics_recreation/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: outdoor_activity_analytics_recreation/infra/airbyte/main.tf
================================================
// Source
resource "airbyte_source_recreation" "my_source_recreation" {
configuration = {
apikey = var.api_key
source_type = "recreation"
}
name = "Recreation"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "recreation_bigquery" {
name = "Recreation to BigQuery"
source_id = airbyte_source_recreation.my_source_recreation.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [{
name = "recreationareas"
},
{
name = "facilities"
},
{
name = "activities"
},
{
name = "campsites"
}]
}
}
================================================
FILE: outdoor_activity_analytics_recreation/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: outdoor_activity_analytics_recreation/infra/airbyte/variables.tf
================================================
variable "api_key" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/orchestration/__init__.py
================================================
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password="password"
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance,)
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: outdoor_activity_analytics_recreation/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: outdoor_activity_analytics_recreation/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: postgres_data_replication/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: postgres_data_replication/README.md
================================================
# Postgres Data Replication Stack
Welcome to the "Postgres Data Replication Stack" repository! This repo provides a quickstart template for building a postgres data replication solution using Airbyte. We will easily synchronize two Postgres databases with Airbyte using Change Data Capture (CDC) and Postgres Write Ahead Log (WAL). While this template doesn't delve into specific data, its goal is to showcase how the data replication solution can be achieved.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Postgres Data Replication Stack](#postgres-data-replication-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add postgres_data_replication
```
2. **Navigate to the directory**:
```bash
cd postgres_data_replication
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or add some transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: postgres_data_replication/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: postgres_data_replication/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
read_changes_using_write_ahead_log_cdc = {
publication = "...pub..."
replication_slot = "...slot..."
}
}
}
name = "Postgres-Primary"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_postgres" "postgres" {
configuration = {
database = "...my_database..."
destination_type = "postgres"
host = "...my_host..."
jdbc_url_params = "...my_jdbc_url_params..."
password = "...my_password..."
port = 5432
schema = "public"
ssl_mode = {
destination_postgres_ssl_modes_allow = {
mode = "allow"
}
}
tunnel_method = {
destination_postgres_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
username = "...my_username..."
}
name = "Postgres-Secondary"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_postgres" {
name = "Postgres to Postgres"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_postgres.postgres.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
schedule = {
cron_expression = "...my_cron_expression..."
schedule_type = "cron"
}
}
================================================
FILE: postgres_data_replication/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: postgres_data_replication/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: postgres_data_replication/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="postgres-data-replication",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: postgres_snowflake_integration/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: postgres_snowflake_integration/README.md
================================================
# Postgres Snowflake Data Integration Stack
Welcome to the "Postgres Snowflake Data Integration Stack" repository! This repo provides a quickstart template for integrating postgres data to snowflake warehouses using Airbyte powering terraform. We will easily integrate data from Postgres databases with Airbyte using terraform airbyte provider. This template could be act as a starter for integrating and also adding new sources, etc... the limits are endless.
This quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Postgres SnowFlake Data integration Stack](#postgres-snowflake-integration-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add postgres_snowflake_integration
```
2. **Navigate to the directory**:
```bash
cd postgres_snowflake_integration
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## Next Steps
Once you've set up and launched this initial integration, the real power lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more data sources, integrate additional tools, or add some transformation logic – the floor is yours. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: postgres_snowflake_integration/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: postgres_snowflake_integration/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
read_changes_using_write_ahead_log_cdc = {
publication = "...pub..."
replication_slot = "...slot..."
}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_snowflake" "snowflake" {
configuration = {
credentials = {
destination_snowflake_authorization_method_key_pair_authentication = {
auth_type = "Key Pair Authentication"
private_key = "...my_private_key..."
private_key_password = "...my_private_key_password..."
}
}
database = "AIRBYTE_DATABASE"
destination_type = "snowflake"
host = "accountname.us-east-2.aws.snowflakecomputing.com"
jdbc_url_params = "...my_jdbc_url_params..."
raw_data_schema = "...my_raw_data_schema..."
role = "AIRBYTE_ROLE"
schema = "AIRBYTE_SCHEMA"
username = "AIRBYTE_USER"
warehouse = "AIRBYTE_WAREHOUSE"
}
name = "Snowflake"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_snowflake" {
name = "Postgres to Snowflake"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_snowflake.snowflake.destination_id
configurations = {
streams = [
{
name = "...my_table_name_1..."
},
{
name = "...my_table_name_2..."
},
]
}
}
================================================
FILE: postgres_snowflake_integration/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: postgres_snowflake_integration/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: postgres_snowflake_integration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="postgres-snowflake-integration",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: postgres_to_mysql_migration/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: postgres_to_mysql_migration/README.md
================================================
# Postgres to MySQL Database Migration Stack
Welcome to the "Postgres to MySQL Database Migration Stack" repository! This repo provides a quickstart template for building a one-off database migration solution from an existing Postgres database to a MySQL database using Airbyte. We will easily migrate the tables and data from the Postgres database to the MySQL database with Airbyte using Change Data Capture (CDC) and Postgres Write Ahead Log (WAL). While this template doesn't delve into specific data, its goal is to showcase how the database migration solution can be achieved with Airbyte.
Just like other Airbyte quickstarts, this quickstart is designed to minimize setup hassles and propel you forward.
## Table of Contents
- [Postgres to MySQL Database Migration Stack](#postgres-to-mysql-database-migration-stack)
- [Table of Contents](#table-of-contents)
- [Infrastructure Layout](#infrastructure-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Setting Up Airbyte Connectors with Terraform](#2-setting-up-airbyte-connectors-with-terraform)
- [Next Steps](#next-steps)
## Infrastructure Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add postgres_to_mysql_migration
```
2. **Navigate to the directory**:
```bash
cd postgres_to_mysql_migration
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your Postgres and MySQL connections. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the Airbyte UI. Here, you should see your Postgres source and MySQL destination connectors, as well as the connection between them, set up and ready to go.
## Next Steps
Once you've set up and launched this initial integration, you can proceed to sync the connection to trigger a one-off migration. The real power of this quickstart lies in its adaptability and extensibility. Here’s a roadmap to help you customize and harness this project tailored to your specific data needs:
1. **Plan your Migration**:
Ideally, database migration should be a planned activity. Do not run a migration job during a production peak. Migration latency depends on factors such as the size of data to be moved, geographic location of the source and destination, and other parameters. Ensure you test thoroughly before deploying to production.
2. **Extend the Project**:
The real beauty of this integration is its extensibility. Whether you want to add more Postgres sources, migrate to more than one MySQL databases, integrate additional tools, or modify the sync schedule – the floor is yours. The granularity of the migration can also be set by selecting the correct sync mode for each stream (table). Read [sync mode](https://docs.airbyte.com/understanding-airbyte/connections/) for more details. With the foundation set, sky's the limit for how you want to extend and refine your data processes.
================================================
FILE: postgres_to_mysql_migration/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: postgres_to_mysql_migration/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_postgres" "postgres" {
configuration = {
database = "...my_database..."
host = "...my_host..."
username = "...my_username..."
password = "...my_password..."
port = 5432
source_type = "postgres"
schemas = [
"...my_schema..."
]
ssl_mode = {
allow = {}
}
tunnel_method = {
no_tunnel = {}
}
replication_method = {
read_changes_using_write_ahead_log_cdc = {
publication = "...pub..."
replication_slot = "...slot..."
}
}
}
name = "Postgres"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_mysql" "mysql" {
configuration = {
database = "...my_database..."
destination_type = "mysql"
host = "...my_host..."
jdbc_url_params = "...my_jdbc_url_params..."
password = "...my_password..."
port = 3306
tunnel_method = {
destination_mysql_ssh_tunnel_method_no_tunnel = {
tunnel_method = "NO_TUNNEL"
}
}
username = "...my_username..."
}
name = "MySQL"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "postgres_to_mysql" {
name = "Postgres to MySQL"
source_id = airbyte_source_postgres.postgres.source_id
destination_id = airbyte_destination_mysql.mysql.destination_id
configurations = {
streams = [
{
cursor_field = ["...",]
name = "...my_table_name_1..."
primary_key = [["...",],]
sync_mode = "full_refresh_append"
},
{
cursor_field = ["...",]
name = "...my_table_name_2..."
primary_key = [["...",],]
sync_mode = "incremental_deduped_history"
},
]
}
}
================================================
FILE: postgres_to_mysql_migration/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.4"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1"
}
================================================
FILE: postgres_to_mysql_migration/infra/airbyte/variables.tf
================================================
variable "workspace_id" {
type = string
}
================================================
FILE: postgres_to_mysql_migration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="postgres-to-mysql-migration",
packages=find_packages(),
install_requires=[
],
extras_require={"dev": ["pytest"]},
)
================================================
FILE: pyairbyte_notebooks/AI ChatBot - 1.0 Launch Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
""
]
},
{
"cell_type": "markdown",
"source": [
"▶️ _View our 1.0 Launch Demo [on YouTube](https://www.youtube.com/watch?v=xhui_QDN8Ck)!_"
],
"metadata": {
"id": "Ctm_l6ZWHMuE"
}
},
{
"cell_type": "markdown",
"metadata": {
"id": "BeGeIDzAxbPS"
},
"source": [
"# Airbyte 1.0 Demo: AI ChatBot with PyAirbyte"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S-54_kh5IBok"
},
"source": [
"This demo shows how **anyone** can quickly build a RAG ChatBot app using PyAirbyte.\n",
"\n",
"\n",
""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "v7Phm4FxnVF5"
},
"source": [
"**Definitions:**\n",
"\n",
"- **Chunking** - Breaking a long text document into smaller \"chunks\".\n",
"- **Embedding** - The process of creating vectors that summarize text or other content.\n",
"- **Vector** - A list of numbers that represent a block of text or other content.\n",
"- **Large language model (LLM)** - An AI model that can predict the best response to a question, given a set of user-provided inputs, called a \"prompt\".\n",
"- **Retrieval-Augmented Generation (RAG)** - The strategy of \"augmenting\" questions sent to the AI by adding relevant context from a vector store lookup.\n",
"- **PGVector** - A popular extension for Postgres, which allows Postgres to store and vector emdeddings and query them.\n",
"\n",
"_Don't worry if any of these are new to you. Airbyte makes building AI pipelines simple so you can focus on what matters._\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "an3tbYCI5eDl"
},
"source": [
"# ⚙ Prereqs: Setup the Environment"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "VGGynBZnxbPT"
},
"source": [
"## Install Python Libraries\n",
"\n",
"_Installing PyAirbyte is as easy as **`pip install airbyte`!**_"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"collapsed": true,
"id": "rWeXNxcHxbPT",
"outputId": "ec6d3c56-730a-459e-ec1b-401a7ce14bdb"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\r0% [Working]\r \rHit:1 https://cloud.r-project.org/bin/linux/ubuntu jammy-cran40/ InRelease\n",
"Hit:2 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64 InRelease\n",
"Hit:3 http://security.ubuntu.com/ubuntu jammy-security InRelease\n",
"Hit:4 http://archive.ubuntu.com/ubuntu jammy InRelease\n",
"Ign:5 https://r2u.stat.illinois.edu/ubuntu jammy InRelease\n",
"Hit:6 https://r2u.stat.illinois.edu/ubuntu jammy Release\n",
"Hit:7 http://archive.ubuntu.com/ubuntu jammy-updates InRelease\n",
"Hit:8 http://archive.ubuntu.com/ubuntu jammy-backports InRelease\n",
"Hit:10 https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu jammy InRelease\n",
"Hit:11 https://ppa.launchpadcontent.net/graphics-drivers/ppa/ubuntu jammy InRelease\n",
"Hit:12 https://ppa.launchpadcontent.net/ubuntugis/ppa/ubuntu jammy InRelease\n",
"Reading package lists... Done\n",
"W: Skipping acquire of configured file 'main/source/Sources' as repository 'https://r2u.stat.illinois.edu/ubuntu jammy InRelease' does not seem to provide it (sources.list entry misspelt?)\n",
"Requirement already satisfied: uv in /usr/local/lib/python3.10/dist-packages (0.4.16)\n"
]
}
],
"source": [
"# Add virtual environment support for running in Google Colab:\n",
"!apt-get update && apt-get install -qq python3.10-venv\n",
"# Install `uv` to speed up pip installs\n",
"%pip install uv\n",
"\n",
"# Install PyAirbyte and OpenAI\n",
"!uv pip install --system --quiet airbyte\n",
"!uv pip install --system --quiet openai\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "sSaek4Hs7GlC"
},
"source": [
"## Import Python libraries"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "BCwiP6Tg7Dcc",
"outputId": "7ca68881-f255-4fbc-9eae-c64e6dccaa9d"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n",
"Using persistent PyAirbyte cache in Google Drive: `/content/drive/Shareddrives/Company/20_Demos/PyAirbyte Demo/Cache/default_cache.duckdb`.\n"
]
}
],
"source": [
"# Import PyAirbyte and get the cache from Google Drive:\n",
"import airbyte as ab\n",
"colab_cache = ab.get_colab_cache(\n",
" drive_name=\"Company\",\n",
" sub_dir=\"20_Demos/PyAirbyte Demo/Cache\",\n",
")\n",
"\n",
"# Standard inputs:\n",
"from pathlib import Path\n",
"from textwrap import dedent\n",
"\n",
"# For accessing Google Drive:\n",
"from google.colab import drive\n",
"\n",
"# For printing Markdown:\n",
"from rich.markdown import Markdown\n",
"from rich import print\n",
"\n",
"# Import OpenAI and setup connection\n",
"import openai\n",
"openai_client = openai.OpenAI(\n",
" api_key=ab.get_secret(\"OPENAI_API_KEY\"),\n",
")\n",
"\n",
"# For working with SQL:\n",
"from sqlalchemy import create_engine, text, URL\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cRTjmG8tXIV6"
},
"source": [
"## Connect to Postgres"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "KXF3Bbs-p4US",
"outputId": "4e784dd5-e7fd-486e-c624-76459c13658c"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"The sql extension is already loaded. To reload it, use:\n",
" %reload_ext sql\n"
]
}
],
"source": [
"# Install JupySQL to enable SQL cell magics (https://jupysql.ploomber.io)\n",
"%pip install --quiet jupysql\n",
"# Load JupySQL extension\n",
"%load_ext sql\n",
"# Configure max row limit (optional)\n",
"%config SqlMagic.displaylimit = 200\n",
"\n",
"# Connect to the Postgres instance with PGVector installed\n",
"# Get the SQLAlchemy 'engine' object for the cache\n",
"engine = create_engine(\n",
" URL.create(\n",
" \"postgresql\",\n",
" host=ab.get_secret(\"POSTGRES_HOST\"),\n",
" username=ab.get_secret(\"POSTGRES_USERNAME\"),\n",
" password=ab.get_secret(\"POSTGRES_PASSWORD\"),\n",
" database=\"ai_db\",\n",
" )\n",
")\n",
"\n",
"# Pass the engine to JupySQL\n",
"%sql engine"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "muYWzVjNXV7i"
},
"source": [
"## Install PGVector"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 137
},
"id": "V68iVoc-sWw-",
"outputId": "11e0a9d5-ebd0-4869-dc80-ca4fc5bc1e6c"
},
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"Running query in 'postgresql://postgres:***@airbyte-ai-demo-01.c7qksqsykgn8.us-east-1.rds.amazonaws.com/ai_db'"
],
"text/html": [
"Running query in 'postgresql://postgres:***@airbyte-ai-demo-01.c7qksqsykgn8.us-east-1.rds.amazonaws.com/ai_db'"
]
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"3 rows affected."
],
"text/html": [
"3 rows affected."
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"+--------------------+\n",
"| schema_name |\n",
"+--------------------+\n",
"| public |\n",
"| information_schema |\n",
"| pg_catalog |\n",
"+--------------------+"
],
"text/html": [
"
\n"
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"[-0.016069870442152023,\n",
" -0.0027824214193969965,\n",
" -0.00472139660269022,\n",
" -0.036299899220466614,\n",
" -0.023806747049093246,\n",
" 0.016577206552028656,\n",
" -0.029070358723402023,\n",
" -0.02003977634012699,\n",
" -0.023337461054325104,\n",
" -0.005200195126235485,\n",
" 0.025303388014435768,\n",
" 0.005558501463383436,\n",
" -0.01047649048268795,\n",
" 0.003928684163838625,\n",
" -0.010134038515388966,\n",
" -0.015499117784202099,\n",
" 0.04373237118124962,\n",
" -0.009227175265550613,\n",
" 0.008003227412700653,\n",
" -0.014649329707026482,\n",
" '...']"
]
},
"metadata": {},
"execution_count": 20
}
],
"source": [
"# @title Test the vector embeddings function {\"run\":\"auto\",\"vertical-output\":true}\n",
"question = \"How are you?\" # @param [\"What new features have users requested for postgres?\",\"What features have been added recently for HubSpot?\",\"Tell me about PGVector.\"] {\"allow-input\":true}\n",
"question_vector = get_vector(question)\n",
"\n",
"print(f\"\"\"\n",
"Question:\n",
"\"{question}\"\n",
"\n",
"Calculated Vector (len={len(question_vector)}):\n",
"\"\"\")\n",
"question_vector[:20]+[\"...\"]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RgKK1feQJmKh"
},
"source": [
"## B. Find relevant **document chunks** in our knowlege base"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"id": "6xTZ1hRTxbPV"
},
"outputs": [],
"source": [
"RAG_SQL_QUERY = \"\"\"\n",
"SELECT document_content, metadata->'number' as number\n",
"FROM issues\n",
"ORDER BY embedding <-> '{question_vector}'\n",
"LIMIT 15\n",
"\"\"\"\n",
"\n",
"# Markdown divider\n",
"HORIZONTAL_DIV = \"\\n\\n-------\\n\\n\"\n",
"\n",
"def get_related_context(question) -> str:\n",
" # Create a SQL query with our question's vector:\n",
" sql_query = RAG_SQL_QUERY.format(\n",
" question_vector=get_vector(question)\n",
" )\n",
" with engine.begin() as connection:\n",
" # Run the query and return results as a single string:\n",
" return (HORIZONTAL_DIV).join(\n",
" [\n",
" f\"Issue {row.number}: {row.document_content}\"\n",
" for row in connection.execute(text(sql_query))\n",
" ]\n",
" )\n"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
},
"id": "bIZKff1w9QUU",
"outputId": "bf882128-de0a-4f19-b043-940474adba2a"
},
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"\u001b[1mQuestion: \"What features have been added recently for HubSpot?\"\u001b[0m \n",
"\n",
"Related Context: \n",
"\n",
"\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n",
"Issue 43317: HubSpot recently began running a \"Batch Update\" process at the end of the month (to reset marketing \n",
"status, etc) which sets all affected records to the exact same timestamp. See example below: \n",
"\n",
"\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n",
"Issue 44481: title: ✨Source Hubspot: Add Leads Stream body: \n",
"\n",
"\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n",
"Issue 36435: title: ✨ Source Linkedin-ads: add new streams (Leads and Forms) body: ## Overview \n"
],
"text/html": [
"
Question: \"What features have been added recently for HubSpot?\" \n",
"\n",
"Related Context: \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 43317: HubSpot recently began running a \"Batch Update\" process at the end of the month (to reset marketing \n",
"status, etc) which sets all affected records to the exact same timestamp. See example below: \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 44481: title: ✨Source Hubspot: Add Leads Stream body: <!-- Thanks for your contribution! Before you submit \n",
"the pull request, I'd like to kindly remind you to take a moment and read through our guidelines to ensure that \n",
"your contribution aligns with the type of contributions our project accepts. All the information you need can be \n",
"found here: \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 43317: Since HubSpot won't let you pull more than 10k search results at a time, the connector grabs the last \n",
"updated time of the last 10k received and uses that timestamp as a starting point to pull the next 10k. For \n",
"example, if the first query \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 44916: I am a paid customer on airbyte cloud and load various data from hubspot/ga4/github into our own data \n",
"platform: timeplus.com \n",
"\n",
"I wrote the destination-timeplus more than 1 year ago and released 0.1.0. After that it seems that airbyte \n",
"engineering team made some update. The latest version is 0.1.17. However in airbyte cloud, it remains 0.1.0 \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 36435: ## Changes Below is a detailed overview of the changes included in this pull request: \n",
"\n",
" Added Form Stream \n",
"\n",
" • Implemented a new stream for ingesting form data, enabling us to capture and integrate user submission data \n",
"directly into our system. This addition is crucial for analytics and monitoring user interactions, providing \n",
"valuable insights into user behavior and preferences. \n",
"\n",
" Added Lead Stream \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 45018: title: 🐙 source-hubspot: run up-to-date pipeline [2024-09-07] body: # Update source-hubspot \n",
"\n",
"This PR was autogenerated by running airbyte-ci connectors --name=source-hubspot up_to_date --pull \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 44894: During the Sync \n",
"\n",
" Relevant information \n",
"\n",
"\n",
"I have connect source from hubspot to S3 while I sync with S3 I got issue to upload data \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 44191: Currenly working with v 3.3.4 google ads connector. \n",
"\n",
"Thanks! \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 45391: I would like to add two new streams in source zendesk support: \n",
"\n",
" • Categories \n",
" • Sections \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 37775: - Connector version is set to 0.1.1 \n",
"\n",
" • Documentation updated \n",
" • docs/integrations/source/partnerstack.md including changelog with an entry for the initial version. \n",
" • docs/integrations/README.md \n",
"\n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 43712: - in OAuth mode, there is also a need for TenantId field \n",
"\n",
"P.S. I am able to connect with V 1.1.0 but I would like to use refresh feature and incremental deduplication \n",
"features in V3. \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 39114: 1. Connect Hubspot 2. Select contacts_form_submissions 3. Launch it as full refresh The table is \n",
"created in the DW with the right columns but it's empty. Our Hubspot is 6 years old and contain many form \n",
"submissions. \n",
"\n",
" Relevant log output \n",
"\n",
"\n",
"\n",
"\n",
"\n",
" Contribute \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 44800: ### Relevant information \n",
"\n",
"\n",
"We can add webhook urls from the airbyte cloud. Can we also add that through powered by airbyte? Please update \n",
"powered by airbyte with this endpoint as well. \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 21123: <!--- We accept contributions! Don't feel pressured, but if you want to contribute we can help you by \n",
"giving some tips, highlighting the necessary code change or explaining any relevant point your feature will impact.\n",
"You can also send questions on #dev Slack channel. \n",
"\n",
"We understand if you can't submit a PR and we're tremendously grateful that you've already contributed by \n",
"suggesting a new feature. --> \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"Issue 36435: title: ✨ Source Linkedin-ads: add new streams (Leads and Forms) body: ## Overview \n",
"
\n"
]
},
"metadata": {}
}
],
"source": [
"# @title Test the RAG lookup function {\"run\":\"auto\",\"vertical-output\":true}\n",
"question = \"What features have been added recently for HubSpot?\" # @param [\"What new features have users requested for postgres?\",\"What features have been added recently for HubSpot?\",\"Tell me about PGVector.\"] {\"allow-input\":true}\n",
"context = get_related_context(question)\n",
"\n",
"print(Markdown(f\"\"\"\n",
"**Question: \"{question}\"**\n",
"\n",
"Related Context:\n",
"\n",
"{HORIZONTAL_DIV}\n",
"\n",
"{context}\n",
"\"\"\"))\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OKBTDbhCxbPV"
},
"source": [
"## C. Call the **large language model** to answer our questions"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "NyOenZWrxbPW"
},
"source": [
"The large language model (LLM) can answer questions if we write them out in english.\n",
"\n",
"When we send the question to the LLM we'll include the below instructions, along with any context we can find that might be helpful.\n"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {
"id": "7nSWX0m8nKZT"
},
"outputs": [],
"source": [
"# We specify our instructions to the LLM in plain English.\n",
"\n",
"PROMPT_TEMPLATE = \"\"\"\n",
"You are an AI assistant. You are able to find answers to the questions\n",
"from the contextual passage snippets provided.\n",
"\n",
"Use the following pieces of information enclosed in tags to provide an\n",
"answer to the question enclosed in tags.\n",
"\n",
"Please provide your answer using markdown, and bullets when appropriate. The\n",
"context will be a set of excerpts from GitHub issues and pull requests. In your\n",
"answer, you should list the issue number that was helpful in answering the\n",
"question.\n",
"\n",
"\n",
"{context}\n",
"\n",
"\n",
"\n",
"{question}\n",
"\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {
"id": "9jTp54NWxbPW"
},
"outputs": [],
"source": [
"# To get an answer we send our prompt to the LLM after\n",
"# populating it with the specific question and the context\n",
"# that we found.\n",
"\n",
"def get_answer(question) -> None:\n",
" answer_text = openai_client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": PROMPT_TEMPLATE.format(\n",
" question=question,\n",
" context=get_related_context(question),\n",
" )\n",
" },\n",
" ],\n",
" ).choices[0].message.content\n",
"\n",
" print(Markdown(\n",
" f'Question: \"{question}\" {HORIZONTAL_DIV}' +\n",
" answer_text\n",
" ))\n"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 113
},
"id": "Z__nLYZBxbPW",
"outputId": "bb4efb93-c941-4a41-a3fc-05fe58b34148"
},
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"Question: \"What features have been added recently for HubSpot?\" \n",
"\n",
"\u001b[33m───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\u001b[0m\n",
"\n",
"\u001b[1;33m • \u001b[0mAdded Form Stream for ingesting form data for analytics and user interaction insights (Issue 36435) \n",
"\u001b[1;33m • \u001b[0mAdded Lead Stream (Issue 36435) \n"
],
"text/html": [
"
Question: \"What features have been added recently for HubSpot?\" \n",
"\n",
"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n",
"\n",
" • Added Form Stream for ingesting form data for analytics and user interaction insights (Issue 36435) \n",
" • Added Lead Stream (Issue 36435) \n",
"
\n"
]
},
"metadata": {}
}
],
"source": [
"# @title Ask the AI Bot! 🤖 {\"run\":\"auto\",\"vertical-output\":true}\n",
"\n",
"question = \"What features have been added recently for HubSpot?\" # @param [\"What new features have users requested for postgres?\",\"What features have been added recently for HubSpot?\",\"Tell me about PGVector.\"] {\"allow-input\":true}\n",
"\n",
"get_answer(question)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5Cx1x0WqxbPW"
},
"source": [
"# ⭐ RECAP and Next Steps\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "x4jU6bm_UY4W"
},
"source": [
"In this demo, we've shown how to build your own AI app with Airbyte. Using Airbyte and PyAirbyte, you can reach data from hundreds of data sources and load to any destination - whether a vector store, data lake, or a SQL warehouse.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "hr6ICbeHRoVs"
},
"source": [
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "A4vnCILIaZ79"
},
"source": [
"## Before we go..."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "SzviOha8Xtp8"
},
"source": [
"👀 _A few things to know about **PyAirbyte** and our **Airbyte Connectors for AI**..._\n",
"\n",
"### Airbyte **Vector Store** Destinations\n",
"\n",
"- **Fully automated embedding, chunking, deduping.**\n",
"- Support for **all major embeddings providers**, including self-hosted embedding services.\n",
"- Support for **all major RAG vector store providers**, including purpose-built vector stores like **Pinecone**, **Chroma**, **Vectara**, and **Milvus**, as well as hybrid-SQL vector stores like **PGVector** and **Snowflake Cortex**.\n",
"\n",
"### Airbyte **Stuctured and Unstructured** Sources\n",
"\n",
"- Can **scrape text from unstructures** sources like PDFs, Google Docs, Word Docs, and many more.\n",
"- Support for **all major cloud storage providers**: S3, Azure, Google Drive, Google Cloud, SharePoint, and more.\n",
"- Support for **hundreds of REST APIs, web services, and databases**, as well as **custom connectors** with the **Airbyte Connector Builder** and **AI Assistant** ✨.\n",
"\n",
"### PyAirbyte Gives Control and Flexibility\n",
"\n",
"- Brings the **full set of Airbyte connectors to Python** so you can experiment and iterate **locally**, with full control over execution.\n",
"- Integrated with **popular AI and Analysis tools**: LangChain, LlamaIndex, Pandas, Apache Arrow, and more.\n",
"- Pipelines can be **easily deployed** to Python runtimes, or promoted to **Airbyte Cloud, Enterprise, or OSS!** 🚀\n"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [
"BeGeIDzAxbPS",
"VGGynBZnxbPT",
"sSaek4Hs7GlC",
"cRTjmG8tXIV6",
"muYWzVjNXV7i",
"q_rCb2vPJ3qb",
"SzviOha8Xtp8"
],
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"display_name": "pyairbyte-hackathon",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.0"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/Chatoverpolygonstockdata_langchain.ipynb
================================================
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"## Install PyAirbyte and other dependencies"
],
"metadata": {
"id": "YKwX9XL1aHrY"
}
},
{
"cell_type": "code",
"source": [
"!pip3 install airbyte langchain_openai langchain-experimental\n"
],
"metadata": {
"id": "9j9kjtEFaKxW"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Polygon Airbyte Datasource"
],
"metadata": {
"id": "Q--tVuoMsGU5"
}
},
{
"cell_type": "code",
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-polygon-stock-api\",\n",
" install_if_missing=True,\n",
" config={\n",
" \"apiKey\": ab.get_secret(\"POLYGON_API_KEY\"),\n",
" \"stocksTicker\": \"AAPL\" ,\n",
" \"multiplier\": 1,\n",
" \"timespan\": \"day\",\n",
" \"start_date\": \"2023-01-01\",\n",
" \"end_date\": \"2023-12-31\",\n",
" \"adjusted\": \"true\"\n",
" },\n",
"\n",
")\n",
"\n",
"# Verify the config and creds by running `check`:\n",
"source.check()"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"id": "6e5bD7A8cBEx",
"outputId": "c7a57019-7623-401f-b529-aaa16a69a762"
},
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"Connection check succeeded for `source-polygon-stock-api`.\n"
],
"text/html": [
"
Connection check succeeded for `source-polygon-stock-api`.\n",
"
\n"
]
},
"metadata": {}
}
]
},
{
"cell_type": "code",
"source": [
"source.select_streams(['stock_api']) # Select only issues stream\n",
"read_result: ab.ReadResult = source.read()"
],
"metadata": {
"id": "EbFErEv8kgzw",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 303
},
"outputId": "54d919d4-c155-4dd3-b0c1-19bbb26e56f5"
},
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
""
],
"text/markdown": "## Read Progress\n\nStarted reading at 17:27:59.\n\nRead **120** records over **3 seconds** (40.0 records / second).\n\nWrote **120** records over 1 batches.\n\nFinished reading at 17:28:03.\n\nStarted finalizing streams at 17:28:03.\n\nFinalized **1** batches over 1 seconds.\n\nCompleted 1 out of 1 streams:\n\n - stock_api\n\n\nCompleted writing at 17:28:05. Total time elapsed: 5 seconds\n\n\n------------------------------------------------\n"
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"Completed `source-polygon-stock-api` read operation at \u001b[1;92m17:28:05\u001b[0m.\n"
],
"text/html": [
"
Completed `source-polygon-stock-api` read operation at 17:28:05.\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "stock_data",
"summary": "{\n \"name\": \"stock_data\",\n \"rows\": 120,\n \"fields\": [\n {\n \"column\": \"Close\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 15.539265956564279,\n \"min\": 125.02,\n \"max\": 187.0,\n \"num_unique_values\": 118,\n \"samples\": [\n 160.25,\n 172.57,\n 130.15\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"High\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 15.372639153874227,\n \"min\": 127.77,\n \"max\": 188.05,\n \"num_unique_values\": 119,\n \"samples\": [\n 160.34,\n 174.59,\n 133.41\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Low\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 15.741200261457518,\n \"min\": 124.17,\n \"max\": 185.23,\n \"num_unique_values\": 120,\n \"samples\": [\n 151.83,\n 147.7,\n 129.89\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Open\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 15.608952409500736,\n \"min\": 126.01,\n \"max\": 186.83,\n \"num_unique_values\": 119,\n \"samples\": [\n 158.86,\n 173.62,\n 130.465\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Timestamp\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 4351425628,\n \"min\": 1672722000000,\n \"max\": 1687752000000,\n \"num_unique_values\": 120,\n \"samples\": [\n 1678251600000,\n 1678680000000,\n 1673240400000\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Volume\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 18456132.44353353,\n \"min\": 37264259.0,\n \"max\": 154338835.0,\n \"num_unique_values\": 120,\n \"samples\": [\n 47204791.0,\n 84457122.0,\n 70790813.0\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"VWAP\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 15.589473557357758,\n \"min\": 125.725,\n \"max\": 186.4952,\n \"num_unique_values\": 120,\n \"samples\": [\n 152.6973,\n 151.1835,\n 131.6292\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
}
},
"metadata": {}
}
]
},
{
"cell_type": "markdown",
"source": [
"## Chat with your data"
],
"metadata": {
"id": "b91ASLVSsKwx"
}
},
{
"cell_type": "code",
"source": [
"from langchain.agents.agent_types import AgentType\n",
"from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n",
"from langchain_openai import ChatOpenAI\n",
"from IPython.display import display"
],
"metadata": {
"id": "K_vft4losRiO"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"agent = create_pandas_dataframe_agent(\n",
" ChatOpenAI(model=\"gpt-3.5-turbo\"),\n",
" stock_data,\n",
" verbose=True,\n",
" agent_type=AgentType.OPENAI_FUNCTIONS,\n",
" )"
],
"metadata": {
"id": "VjP_PfhEscBk"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"output = agent.invoke(\"what is the given data is about?\")\n",
"print(output['output'])"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "K8lQaOretq5S",
"outputId": "76ea1ddc-f3bb-4db6-bf7f-87b00747b804"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mBased on the columns in the dataframe `df`, the data appears to be financial market data related to trading. Here are the columns and their potential meanings:\n",
"\n",
"- `Close`: Closing price of a financial asset\n",
"- `High`: High price of a financial asset during a specific time period\n",
"- `Low`: Low price of a financial asset during a specific time period\n",
"- `Open`: Opening price of a financial asset\n",
"- `Timestamp`: Timestamp of the data point\n",
"- `Volume`: Volume of the financial asset traded\n",
"- `VWAP`: Volume Weighted Average Price\n",
"\n",
"Therefore, the data in the dataframe `df` seems to represent price and volume data of a financial asset over different timestamps.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"Based on the columns in the dataframe `df`, the data appears to be financial market data related to trading. Here are the columns and their potential meanings:\n",
"\n",
"- `Close`: Closing price of a financial asset\n",
"- `High`: High price of a financial asset during a specific time period\n",
"- `Low`: Low price of a financial asset during a specific time period\n",
"- `Open`: Opening price of a financial asset\n",
"- `Timestamp`: Timestamp of the data point\n",
"- `Volume`: Volume of the financial asset traded\n",
"- `VWAP`: Volume Weighted Average Price\n",
"\n",
"Therefore, the data in the dataframe `df` seems to represent price and volume data of a financial asset over different timestamps.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"output = agent.invoke(\"what is the yearly returns of the given stock?\")\n",
"print(output['output'])"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "zb9B_glfuYN0",
"outputId": "4f91af29-f371-4c60-b9e5-92e44884b9df"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `{'query': \"df['Year'] = pd.to_datetime(df['Timestamp'], unit='ms').dt.year\\nyearly_returns = df.groupby('Year')['Close'].last() / df.groupby('Year')['Close'].first() - 1\\nyearly_returns\"}`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mYear\n",
"2023 0.48133\n",
"Name: Close, dtype: float64\u001b[0m\u001b[32;1m\u001b[1;3mThe yearly returns of the given stock for the year 2023 are approximately 48.13%.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"The yearly returns of the given stock for the year 2023 are approximately 48.13%.\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"output = agent.invoke(\"what is the 50-day moving average of the entire data?\")\n",
"print(output['output'])"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "pf41wXkuumnp",
"outputId": "59088435-3391-4d64-ec84-87429e47eda5"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `python_repl_ast` with `{'query': \"df['Close'].rolling(window=50).mean().iloc[-1]\"}`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3m174.67870000000002\u001b[0m\u001b[32;1m\u001b[1;3mThe 50-day moving average of the entire data is approximately 174.68.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"The 50-day moving average of the entire data is approximately 174.68.\n"
]
}
]
}
]
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_Apify_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "3s3Jow7_BjQi"
},
"source": [
"# Scraping Web Data from Apify Source into Airbyte for LangChain\n",
"\n",
"This tutorial will demonstrate how to scrape data from a website using Apify, load the scraped data using PyAirbyte, and store the data in a database using LangChain.\n",
"Integrating web data into LLMs can enhance their performance by providing up-to-date and relevant information. This process can be complex, and this guide aims to simplify it for users.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **Apify Account**:\n",
" - Follow the instructions in the [Apify](https://docs.airbyte.com/integrations/sources/apify-dataset) to set up your apify account and obtain the necessary access keys.\n",
"\n",
"2. **Pinecone Account**:\n",
" - **Create a Pinecone Account**: Sign up for an account on the [Pinecone website](https://www.pinecone.io/).\n",
" - **Obtain Pinecone API Key**: Generate a new API key from your Pinecone project settings. For detailed instructions, refer to the [Pinecone documentation](https://docs.pinecone.io/docs/quickstart).\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://platform.openai.com/docs/overview).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://platform.openai.com/api-keys).\n",
"\n",
"\n",
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ij3THvimBjQk"
},
"outputs": [],
"source": [
"# Add virtual environment support in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte openai langchain pinecone-client langchain-openai langchain-pinecone python-dotenv langchainhub"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8jDsEZdiBjQl"
},
"source": [
"## Setup Source Apify with PyAirbyte\n",
"\n",
"The provided code configures an Airbyte source to extract data from specific dataset in apify.\n",
"\n",
"To configure according to your requirements, you can refer to [this references](https://docs.airbyte.com/integrations/sources/apify-dataset#reference).\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "U7DxyLVUBjQl"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-apify-dataset\",\n",
" config={\n",
" \"token\": ab.get_secret(\"API_TOKEN\"),\n",
" \"dataset_id\": ab.get_secret(\"DATASET_ID\"),\n",
" }\n",
")\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "rtSCjGcQBjQl"
},
"source": [
"This is a basic process of fetching data from Apify dataset using Airbyte and converting it into a format suitable for further processing or analysis."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "mlg7K8GUBjQm"
},
"outputs": [],
"source": [
"source.select_all_streams() # Select all streams\n",
"read_result = source.read() # Read the data\n",
"review_list = [doc for doc in read_result[\"item_collection\"].to_documents()] # We are only intrested in item_collection stream only\n",
"\n",
"print(str(review_list[10]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "L6KM7KPMBjQm"
},
"source": [
"# Use Langchain to build a RAG pipeline.\n",
"\n",
"The code uses RecursiveCharacterTextSplitter to break documents into smaller chunks. Metadata within these chunks is converted to strings. This facilitates efficient processing of large texts, enhancing analysis capabilities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "LXvSJoUSBjQm",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "eb25babf-f917-4f1d-8970-5e8acd132776"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Created 493 document chunks.\n"
]
}
],
"source": [
"\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(review_list)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kCF7gZTMBjQm"
},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"## Embedding Technique Of OPENAI\n",
"embeddings=OpenAIEmbeddings()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5TKCIAhYBjQm"
},
"source": [
"## Setting up Pinecone\n",
"\n",
"Pinecone is a managed vector database service designed for storing, indexing, and querying high-dimensional vector data efficiently."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "iXb5YPnhBjQn"
},
"outputs": [],
"source": [
"from pinecone import Pinecone, ServerlessSpec\n",
"from pinecone import Pinecone\n",
"\n",
"os.environ['PINECONE_API_KEY'] = ab.get_secret(\"PINECONE_API_KEY\")\n",
"pc = Pinecone()\n",
"index_name = \"apifyproductreview\" # Replace with your index name\n",
"\n",
"\n",
"# Uncomment this if you have not created a Pinecone index yet\n",
"\n",
"spec = ServerlessSpec(cloud=\"aws\", region=\"us-east-1\") # Replace with your cloud and region\n",
"pc.create_index(\n",
" name = index_name,\n",
" dimension=1536, # Replace with your model dimensions\n",
" metric='cosine', # Replace with your model metric\n",
" spec=spec\n",
")\n",
"\n",
"index = pc.Index(index_name)\n",
"\n",
"index.describe_index_stats()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6kRv6s7zBjQn"
},
"source": [
"PineconeVectorStore is a class provided by the LangChain library specifically designed for interacting with Pinecone vector stores.\n",
"from_documents method of PineconeVectorStore is used to create or update vectors in a Pinecone vector store based on the provided documents and their corresponding embeddings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "3hToKOPsBjQn"
},
"outputs": [],
"source": [
"from langchain_pinecone import PineconeVectorStore\n",
"\n",
"pinecone = PineconeVectorStore.from_documents(\n",
" chunked_docs, embeddings, index_name=index_name\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "vj0hSWo2BjQn"
},
"source": [
"Now setting up a pipeline for RAG using LangChain, incorporating document retrieval from Pinecone, prompt configuration, and a chat model from OpenAI for response generation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "N0gE_LbmBjQn"
},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"retriever = pinecone.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "axbwi9j8BjQn"
},
"outputs": [],
"source": [
"print(rag_chain.invoke(\"What is overall review of products\"))\n"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "myenv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_Basic_Features_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "R8XtHKK4PujA"
},
"source": [
"# PyAirbyte Demo\n",
"\n",
"Below is a pre-release demo of PyAirbyte.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Lyxh2NLuQJUf"
},
"source": [
"## Install PyAirbyte\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9DEgu1WpQNt-"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install PyAirbyte\n",
"%pip install --quiet airbyte"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cXJ_cRQV7nIb"
},
"source": [
"## Locating your Data Source\n",
"\n",
"To see what data sources are available, you can check [our docs](https://docs.airbyte.com/using-airbyte/airbyte-lib/getting-started) or run the following:\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "tfjct5EQ7nIb"
},
"outputs": [],
"source": [
"# Import PyAirbyte\n",
"import airbyte as ab\n",
"\n",
"# Show all available connectors\n",
"ab.get_available_connectors()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "JWWeEbTVEDFz"
},
"source": [
"## Load the Source Data using PyAirbyte\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "1PhfWpS8QVzE"
},
"source": [
"Create and install a source connector:\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 129
},
"id": "5BI9hIeUvxXE",
"outputId": "4d674fb3-25f1-400e-f2e6-c4160cb245e4"
},
"outputs": [
{
"data": {
"text/html": [
"
Connector 'source-faker' installed successfully!\n",
"For more information, see the source-faker documentation:\n",
"https://docs.airbyte.com/integrations/sources/faker#reference\n",
"\n",
"
Connection check succeeded for `source-faker`.\n",
"
\n"
],
"text/plain": [
"Connection check succeeded for `source-faker`.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Configure the source\n",
"source.set_config(\n",
" config={\n",
" \"count\": 50_000, # Adjust this to get a larger or smaller dataset\n",
" \"seed\": 123,\n",
" },\n",
")\n",
"# Verify the config and creds by running `check`:\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Que5DAqtEJu0"
},
"source": [
"## Read Data from the PyAirbyte Cache\n",
"\n",
"Once data is read, we can do anything we want to with the resulting streams. This includes `to_pandas()` which registers a Pandas dataframe and `to_sql_table()` which gives us a SQLAlchemy `Table` boject, which we can use to run SQL queries.\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 337
},
"id": "qQVRO69c2DoA",
"outputId": "b94fa743-ed48-4242-9344-ee60cee2f225"
},
"outputs": [
{
"data": {
"text/markdown": [
"## Read Progress\n",
"\n",
"Started reading at 20:10:41.\n",
"\n",
"Read **100,100** records over **1min 17s** (1,300.0 records / second).\n",
"\n",
"Wrote **100,100** records over 11 batches.\n",
"\n",
"Finished reading at 20:11:59.\n",
"\n",
"Started finalizing streams at 20:11:59.\n",
"\n",
"Finalized **11** batches over 1 seconds.\n",
"\n",
"Completed 3 out of 3 streams:\n",
"\n",
" - users\n",
" - purchases\n",
" - products\n",
"\n",
"\n",
"Completed writing at 20:12:01. Total time elapsed: 1min 19s\n",
"\n",
"\n",
"------------------------------------------------\n"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Completed `source-faker` read operation at 20:12:01.\n",
"
\n"
],
"text/plain": [
"Completed `source-faker` read operation at \u001b[1;92m20:12:01\u001b[0m.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Select all of the source's streams and read data into the internal cache:\n",
"source.select_all_streams()\n",
"read_result: ab.ReadResult = source.read()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 424
},
"id": "gRuGVOoDEw1R",
"outputId": "8c221488-54b7-4eed-a882-dc6d776d7b8e"
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"
\n",
"
\n",
"\n",
"
\n",
" \n",
"
\n",
"
\n",
"
id
\n",
"
make
\n",
"
model
\n",
"
year
\n",
"
price
\n",
"
created_at
\n",
"
updated_at
\n",
"
\n",
" \n",
" \n",
"
\n",
"
0
\n",
"
1
\n",
"
Mazda
\n",
"
MX-5
\n",
"
2008
\n",
"
2869.0
\n",
"
2022-02-01 17:02:19
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
1
\n",
"
2
\n",
"
Mercedes-Benz
\n",
"
C-Class
\n",
"
2009
\n",
"
42397.0
\n",
"
2021-01-25 14:31:33
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
2
\n",
"
3
\n",
"
Honda
\n",
"
Accord Crosstour
\n",
"
2011
\n",
"
63293.0
\n",
"
2021-02-11 05:36:03
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
3
\n",
"
4
\n",
"
GMC
\n",
"
Jimmy
\n",
"
1998
\n",
"
34079.0
\n",
"
2022-01-24 03:00:03
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
4
\n",
"
5
\n",
"
Infiniti
\n",
"
FX
\n",
"
2004
\n",
"
17036.0
\n",
"
2021-10-02 03:55:44
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
...
\n",
"
...
\n",
"
...
\n",
"
...
\n",
"
...
\n",
"
...
\n",
"
...
\n",
"
...
\n",
"
\n",
"
\n",
"
95
\n",
"
96
\n",
"
BMW
\n",
"
330
\n",
"
2006
\n",
"
14494.0
\n",
"
2021-09-17 20:52:48
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
96
\n",
"
97
\n",
"
Audi
\n",
"
R8
\n",
"
2008
\n",
"
17642.0
\n",
"
2021-09-21 11:56:24
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
97
\n",
"
98
\n",
"
Cadillac
\n",
"
CTS-V
\n",
"
2007
\n",
"
19914.0
\n",
"
2021-09-02 15:38:46
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
98
\n",
"
99
\n",
"
GMC
\n",
"
1500 Club Coupe
\n",
"
1997
\n",
"
82288.0
\n",
"
2021-04-20 18:58:15
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
"
\n",
"
99
\n",
"
100
\n",
"
Buick
\n",
"
Somerset
\n",
"
1986
\n",
"
64148.0
\n",
"
2021-06-10 19:07:38
\n",
"
2024-02-12 20:10:42
\n",
"
\n",
" \n",
"
\n",
"
100 rows × 7 columns
\n",
"
\n",
"
\n",
"\n",
"
\n",
" \n",
"\n",
" \n",
"\n",
" \n",
"
\n",
"\n",
"\n",
"
\n",
" \n",
"\n",
"\n",
"\n",
" \n",
"
\n",
"
\n",
"
\n"
],
"text/plain": [
" id make model year price created_at \\\n",
"0 1 Mazda MX-5 2008 2869.0 2022-02-01 17:02:19 \n",
"1 2 Mercedes-Benz C-Class 2009 42397.0 2021-01-25 14:31:33 \n",
"2 3 Honda Accord Crosstour 2011 63293.0 2021-02-11 05:36:03 \n",
"3 4 GMC Jimmy 1998 34079.0 2022-01-24 03:00:03 \n",
"4 5 Infiniti FX 2004 17036.0 2021-10-02 03:55:44 \n",
".. ... ... ... ... ... ... \n",
"95 96 BMW 330 2006 14494.0 2021-09-17 20:52:48 \n",
"96 97 Audi R8 2008 17642.0 2021-09-21 11:56:24 \n",
"97 98 Cadillac CTS-V 2007 19914.0 2021-09-02 15:38:46 \n",
"98 99 GMC 1500 Club Coupe 1997 82288.0 2021-04-20 18:58:15 \n",
"99 100 Buick Somerset 1986 64148.0 2021-06-10 19:07:38 \n",
"\n",
" updated_at \n",
"0 2024-02-12 20:10:42 \n",
"1 2024-02-12 20:10:42 \n",
"2 2024-02-12 20:10:42 \n",
"3 2024-02-12 20:10:42 \n",
"4 2024-02-12 20:10:42 \n",
".. ... \n",
"95 2024-02-12 20:10:42 \n",
"96 2024-02-12 20:10:42 \n",
"97 2024-02-12 20:10:42 \n",
"98 2024-02-12 20:10:42 \n",
"99 2024-02-12 20:10:42 \n",
"\n",
"[100 rows x 7 columns]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Display or transform the loaded data\n",
"products_df = read_result[\"products\"].to_pandas()\n",
"display(products_df)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "VJgl59hBF1rJ"
},
"source": [
"## Creating graphs\n",
"\n",
"PyAirbyte integrates with Pandas, which integrates with `matplotlib` as well as many other popular libraries. We can use this as a means of quickly creating graphs.\n"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 718
},
"id": "XcQcBlYjF3oO",
"outputId": "b8b2ae5f-605d-485a-a3c7-8bf72fd826a9"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (3.7.1)\n",
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.2.0)\n",
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (0.12.1)\n",
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (4.48.1)\n",
"Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.4.5)\n",
"Requirement already satisfied: numpy>=1.20 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.23.5)\n",
"Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (23.2)\n",
"Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (9.4.0)\n",
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (3.1.1)\n",
"Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (2.8.2)\n",
"Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/usr/local/lib/python3.10/dist-packages/duckdb_engine/__init__.py:178: DuckDBEngineWarning: duckdb-engine doesn't yet support reflection on indices\n",
" warnings.warn(\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAkQAAAHHCAYAAABeLEexAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA/LUlEQVR4nO3deVhWdf7/8dctuwvgkqCjIqUpZGpiKT+XXEhKajSptFzQtLJwNy2nck9NR01HkxpNrDTTJlvcSVxGxQ0lTQutbLAUyAUQEkQ5vz+6OF9vUVNCbuA8H9d1ruH+fN73ud/nDIOvOffn3LfNMAxDAAAAFlbO0Q0AAAA4GoEIAABYHoEIAABYHoEIAABYHoEIAABYHoEIAABYHoEIAABYHoEIAABYHoEIAABYHoEIgKlu3brq27evo9so82bMmKE777xTTk5Oatq0qaPbASACEVBmRUdHy2azad++fdecb9eunRo1avSXX2ft2rUaP378X96PVWzcuFGjR49Wq1attHjxYk2ZMuWmnvfUU0/JZrPplVdeuc0dAtbk7OgGAJQciYmJKlfu1v5/0tq1azV//nxC0U2KjY1VuXLltGjRIrm6ut7UczIyMvTVV1+pbt26+vjjjzVt2jTZbLbb3ClgLVwhAmByc3OTi4uLo9u4JVlZWY5u4ZakpqbKw8PjpsOQJP3nP//R5cuX9f777+vEiRPatm3bbewQsCYCEQDT1WuIcnNzNWHCBNWvX1/u7u6qWrWqWrdurZiYGElS3759NX/+fEmSzWYzt3xZWVkaOXKkateuLTc3NzVo0ED//Oc/ZRiG3eteuHBBQ4YMUbVq1VSpUiX9/e9/16+//iqbzWZ35Wn8+PGy2Ww6cuSInnnmGVWuXFmtW7eWJB08eFB9+/bVnXfeKXd3d/n6+urZZ5/VmTNn7F4rfx9Hjx5Vr1695OXlpTvuuENvvPGGDMPQiRMn1KVLF3l6esrX11czZ868qXN36dIlTZo0SXfddZfc3NxUt25d/eMf/1BOTo5ZY7PZtHjxYmVlZZnnKjo6+k/3vXTpUj300ENq3769AgICtHTp0mvWHTx4UA8++KA8PDxUq1YtTZ48WYsXL5bNZtPPP/9sV7tu3Tq1adNGFSpUUKVKlRQWFqbDhw/b1SQnJ6tfv36qVauW3NzcVKNGDXXp0qXAvoCygLfMgDIuPT1dp0+fLjCem5v7p88dP368pk6dqgEDBuiBBx5QRkaG9u3bp/379+uhhx7SCy+8oJMnTyomJkYffvih3XMNw9Df//53bd68Wf3791fTpk21YcMGjRo1Sr/++qtmz55t1vbt21crVqxQ79691bJlS23dulVhYWHX7evJJ59U/fr1NWXKFDNcxcTE6KefflK/fv3k6+urw4cP67333tPhw4e1a9euAm8xde/eXQEBAZo2bZrWrFmjyZMnq0qVKnr33XfVoUMHvfXWW1q6dKlefvll3X///Wrbtu0Nz9WAAQO0ZMkSPfHEExo5cqR2796tqVOn6rvvvtOqVaskSR9++KHee+897dmzRwsXLpQk/b//9/9uuN+TJ09q8+bNWrJkiSTp6aef1uzZszVv3jy7q0y//vqr2rdvL5vNpjFjxqhChQpauHCh3NzcCuzzww8/VEREhEJDQ/XWW2/p999/14IFC9S6dWsdOHBAdevWlSSFh4fr8OHDGjx4sOrWravU1FTFxMQoKSnJrAHKDANAmbR48WJD0g23e+65x+45fn5+RkREhPm4SZMmRlhY2A1fJzIy0rjWn5LPP//ckGRMnjzZbvyJJ54wbDab8cMPPxiGYRjx8fGGJGPYsGF2dX379jUkGePGjTPHxo0bZ0gynn766QKv9/vvvxcY+/jjjw1JxrZt2wrs4/nnnzfHLl26ZNSqVcuw2WzGtGnTzPFz584ZHh4edufkWhISEgxJxoABA+zGX375ZUOSERsba45FREQYFSpUuOH+rvTPf/7T8PDwMDIyMgzDMIyjR48akoxVq1bZ1Q0ePNiw2WzGgQMHzLEzZ84YVapUMSQZx48fNwzDMM6fP294e3sbzz33nN3zk5OTDS8vL3P83LlzhiRjxowZN90rUJrxlhlQxs2fP18xMTEFtsaNG//pc729vXX48GEdO3bsll937dq1cnJy0pAhQ+zGR44cKcMwtG7dOknS+vXrJUkvvfSSXd3gwYOvu++BAwcWGPPw8DB/zs7O1unTp9WyZUtJ0v79+wvUDxgwwPzZyclJzZs3l2EY6t+/vznu7e2tBg0a6KeffrpuL9IfxypJI0aMsBsfOXKkJGnNmjU3fP6NLF26VGFhYapUqZIkqX79+goKCirwttn69esVHBxsdxt/lSpV1LNnT7u6mJgYpaWl6emnn9bp06fNzcnJSS1atNDmzZslyVzntGXLFp07d67Q/QOlBW+ZAWXcAw88oObNmxcYr1y58jXfSrvSxIkT1aVLF919991q1KiRHn74YfXu3fumwtT//vc/1axZ0/yHPF9AQIA5n/+f5cqVk7+/v11dvXr1rrvvq2sl6ezZs5owYYKWL1+u1NRUu7n09PQC9XXq1LF77OXlJXd3d1WrVq3A+NXrkK6WfwxX9+zr6ytvb2/zWG/Vd999pwMHDqhPnz764YcfzPF27dpp/vz5ysjIkKenp9lDcHBwgX1c3VN+uO3QocM1XzN/f25ubnrrrbc0cuRI+fj4qGXLlnr00UfVp08f+fr6Fup4gJKMQATgutq2basff/xRX3zxhTZu3KiFCxdq9uzZioqKsrvCUtyuvBqU76mnntLOnTs1atQoNW3aVBUrVlReXp4efvhh5eXlFah3cnK6qTFJBRaBX09R3wr/0UcfSZKGDx+u4cOHF5j/z3/+o379+t3SPvPPxYcffnjNYOPs/H//LAwbNkyPPfaYPv/8c23YsEFvvPGGpk6dqtjYWN1333239LpASUcgAnBDVapUUb9+/dSvXz9lZmaqbdu2Gj9+vBmIrhcC/Pz89PXXX+v8+fN2V4m+//57cz7/P/Py8nT8+HHVr1/frLvyisifOXfunDZt2qQJEyZo7Nix5nhh3uorjPxjOHbsmHkFTJJSUlKUlpZmHuutMAxDy5YtU/v27Qu8nShJkyZN0tKlS81A5Ofnd81zdvXYXXfdJUmqXr26QkJC/rSPu+66SyNHjtTIkSN17NgxNW3aVDNnzjTDGlBWsIYIwHVd/VZRxYoVVa9ePbtbyStUqCBJSktLs6vt3LmzLl++rHnz5tmNz549WzabTY888ogkKTQ0VJL0zjvv2NX961//uuk+86/sXH0l5+23377pffwVnTt3vubrzZo1S5JueMfc9ezYsUM///yz+vXrpyeeeKLA1r17d23evFknT56U9Md5jIuLU0JCgrmPs2fPFlhrFBoaKk9PT02ZMuWadxr+9ttvkqTff/9d2dnZdnN33XWXKlWqZPffP1BWcIUIwHUFBgaqXbt2CgoKUpUqVbRv3z59+umnGjRokFkTFBQkSRoyZIhCQ0Pl5OSkHj166LHHHlP79u312muv6eeff1aTJk20ceNGffHFFxo2bJh5pSIoKEjh4eF6++23debMGfO2+6NHj0q6ubehPD091bZtW02fPl25ubn629/+po0bN+r48eO34awU1KRJE0VEROi9995TWlqaHnzwQe3Zs0dLlixR165d1b59+1ve59KlS+Xk5HTdMPX3v/9dr732mpYvX64RI0Zo9OjR+uijj/TQQw9p8ODB5m33derU0dmzZ83z6OnpqQULFqh3795q1qyZevTooTvuuENJSUlas2aNWrVqpXnz5uno0aPq2LGjnnrqKQUGBsrZ2VmrVq1SSkqKevTo8ZfOF1AiOfQeNwC3Tf5t93v37r3m/IMPPvint91PnjzZeOCBBwxvb2/Dw8PDaNiwofHmm28aFy9eNGsuXbpkDB482LjjjjsMm81mdwv++fPnjeHDhxs1a9Y0XFxcjPr16xszZsww8vLy7F43KyvLiIyMNKpUqWJUrFjR6Nq1q5GYmGhIsrsNPv+W+d9++63A8fzyyy/G448/bnh7exteXl7Gk08+aZw8efK6t+5fvY/r3Q5/rfN0Lbm5ucaECRMMf39/w8XFxahdu7YxZswYIzs7+6Ze50oXL140qlatarRp0+aGdf7+/sZ9991nPj5w4IDRpk0bw83NzahVq5YxdepUY+7cuYYkIzk52e65mzdvNkJDQw0vLy/D3d3duOuuu4y+ffsa+/btMwzDME6fPm1ERkYaDRs2NCpUqGB4eXkZLVq0MFasWPGn5wIojWyGcZOrBQGgGCUkJOi+++7TRx99VODWcdy8YcOG6d1331VmZuZ1F40DYA0RgBLgwoULBcbefvttlStX7k8/IRr/5+rzeObMGX344Ydq3bo1YQj4E6whAuBw06dPV3x8vNq3by9nZ2etW7dO69at0/PPP6/atWs7ur1SIzg4WO3atVNAQIBSUlK0aNEiZWRk6I033nB0a0CJx1tmABwuJiZGEyZM0JEjR5SZmak6deqod+/eeu211+w+Fwc39o9//EOffvqpfvnlF9lsNjVr1kzjxo27qdvrAasjEAEAAMtjDREAALA8AhEAALA83py/CXl5eTp58qQqVapU5N9VBAAAbg/DMHT+/HnVrFlT5crd+BoQgegmnDx5kjtdAAAopU6cOKFatWrdsIZAdBPyv5jyxIkT8vT0dHA3AADgZmRkZKh27dp2XzB9PQSim3DldwARiAAAKF1uZrkLi6oBAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlEYgAAIDlOTwQ/frrr+rVq5eqVq0qDw8P3Xvvvdq3b585bxiGxo4dqxo1asjDw0MhISE6duyY3T7Onj2rnj17ytPTU97e3urfv78yMzPtag4ePKg2bdrI3d1dtWvX1vTp04vl+AAAQMnn0EB07tw5tWrVSi4uLlq3bp2OHDmimTNnqnLlymbN9OnTNXfuXEVFRWn37t2qUKGCQkNDlZ2dbdb07NlThw8fVkxMjFavXq1t27bp+eefN+czMjLUqVMn+fn5KT4+XjNmzND48eP13nvvFevxAgCAkslmGIbhqBd/9dVXtWPHDv33v/+95rxhGKpZs6ZGjhypl19+WZKUnp4uHx8fRUdHq0ePHvruu+8UGBiovXv3qnnz5pKk9evXq3Pnzvrll19Us2ZNLViwQK+99pqSk5Pl6upqvvbnn3+u77///k/7zMjIkJeXl9LT0+Xp6VlERw8AwO2RlJSk06dPO7qNW1KtWjXVqVOnSPd5K/9+OxfpK9+iL7/8UqGhoXryySe1detW/e1vf9NLL72k5557TpJ0/PhxJScnKyQkxHyOl5eXWrRoobi4OPXo0UNxcXHy9vY2w5AkhYSEqFy5ctq9e7cef/xxxcXFqW3btmYYkqTQ0FC99dZbOnfunN0VKQAASrOkpCQ1aBig7Au/O7qVW+LuUV6J339X5KHoZjk0EP30009asGCBRowYoX/84x/au3evhgwZIldXV0VERCg5OVmS5OPjY/c8Hx8fcy45OVnVq1e3m3d2dlaVKlXsavz9/QvsI3/u6kCUk5OjnJwc83FGRkYRHC0AALff6dOnlX3hd1V9dKRcqtZ2dDs3JffMCZ1ZPVOnT5+2ZiDKy8tT8+bNNWXKFEnSfffdp2+//VZRUVGKiIhwWF9Tp07VhAkTHPb6AAD8VS5Va8vNt56j2yg1HLqoukaNGgoMDLQbCwgIUFJSkiTJ19dXkpSSkmJXk5KSYs75+voqNTXVbv7SpUs6e/asXc219nHla1xpzJgxSk9PN7cTJ04U9hABAEAp4NBA1KpVKyUmJtqNHT16VH5+fpIkf39/+fr6atOmTeZ8RkaGdu/ereDgYElScHCw0tLSFB8fb9bExsYqLy9PLVq0MGu2bdum3NxcsyYmJkYNGjS45vohNzc3eXp62m0AAKDscmggGj58uHbt2qUpU6bohx9+0LJly/Tee+8pMjJSkmSz2TRs2DBNnjxZX375pQ4dOqQ+ffqoZs2a6tq1q6Q/rig9/PDDeu6557Rnzx7t2LFDgwYNUo8ePVSzZk1J0jPPPCNXV1f1799fhw8f1ieffKI5c+ZoxIgRjjp0AABQgjh0DdH999+vVatWacyYMZo4caL8/f319ttvq2fPnmbN6NGjlZWVpeeff15paWlq3bq11q9fL3d3d7Nm6dKlGjRokDp27Khy5copPDxcc+fONee9vLy0ceNGRUZGKigoSNWqVdPYsWPtPqsIAABYl0M/h6i04HOIAAClxf79+xUUFCTfiLdLzaLqnOQflLxkmOLj49WsWbMi2++t/Pvt8K/uAAAAcDQCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDwCEQAAsDyHBqLx48fLZrPZbQ0bNjTns7OzFRkZqapVq6pixYoKDw9XSkqK3T6SkpIUFham8uXLq3r16ho1apQuXbpkV7NlyxY1a9ZMbm5uqlevnqKjo4vj8AAAQCnh8CtE99xzj06dOmVu27dvN+eGDx+ur776SitXrtTWrVt18uRJdevWzZy/fPmywsLCdPHiRe3cuVNLlixRdHS0xo4da9YcP35cYWFhat++vRISEjRs2DANGDBAGzZsKNbjBAAAJZezwxtwdpavr2+B8fT0dC1atEjLli1Thw4dJEmLFy9WQECAdu3apZYtW2rjxo06cuSIvv76a/n4+Khp06aaNGmSXnnlFY0fP16urq6KioqSv7+/Zs6cKUkKCAjQ9u3bNXv2bIWGhhbrsQIAgJLJ4VeIjh07ppo1a+rOO+9Uz549lZSUJEmKj49Xbm6uQkJCzNqGDRuqTp06iouLkyTFxcXp3nvvlY+Pj1kTGhqqjIwMHT582Ky5ch/5Nfn7uJacnBxlZGTYbQAAoOxy6BWiFi1aKDo6Wg0aNNCpU6c0YcIEtWnTRt9++62Sk5Pl6uoqb29vu+f4+PgoOTlZkpScnGwXhvLn8+duVJORkaELFy7Iw8OjQF9Tp07VhAkTiuowy6SkpCSdPn3a0W3ckmrVqqlOnTqObgMAUAI5NBA98sgj5s+NGzdWixYt5OfnpxUrVlwzqBSXMWPGaMSIEebjjIwM1a5d22H9lDRJSUlq0DBA2Rd+d3Qrt8Tdo7wSv/+OUAQAKMDha4iu5O3trbvvvls//PCDHnroIV28eFFpaWl2V4lSUlLMNUe+vr7as2eP3T7y70K7subqO9NSUlLk6el53dDl5uYmNze3ojqsMuf06dPKvvC7qj46Ui5VS0dQzD1zQmdWz9Tp06dLVSDiSlzx4DwDKFGBKDMzUz/++KN69+6toKAgubi4aNOmTQoPD5ckJSYmKikpScHBwZKk4OBgvfnmm0pNTVX16tUlSTExMfL09FRgYKBZs3btWrvXiYmJMfeBwnOpWltuvvUc3UaZVVqvxLm5ues///lUNWrUcHQrN+XUqVMKf+JJ5WRfcHQrt4QrnkDRcmggevnll/XYY4/Jz89PJ0+e1Lhx4+Tk5KSnn35aXl5e6t+/v0aMGKEqVarI09NTgwcPVnBwsFq2bClJ6tSpkwIDA9W7d29Nnz5dycnJev311xUZGWle4Rk4cKDmzZun0aNH69lnn1VsbKxWrFihNWvWOPLQgT9VGq/EZf9yWGmxC/Xoo486upVbVprOc2m94gmUZA4NRL/88ouefvppnTlzRnfccYdat26tXbt26Y477pAkzZ49W+XKlVN4eLhycnIUGhqqd955x3y+k5OTVq9erRdffFHBwcGqUKGCIiIiNHHiRLPG399fa9as0fDhwzVnzhzVqlVLCxcu5JZ7lBql6Upc7pkTkmGUqnBx4ad9Sv/vR6XqPAMoeg4NRMuXL7/hvLu7u+bPn6/58+dft8bPz6/AW2JXa9eunQ4cOFCoHotDaVu/8N133zm6BZRwpSlc5J454egWLKW0/b2TWK9lFSVqDZEVldZ1IgBwq0rr3zvWa1kDgcjBSuM6kfy3GADgVpTGv3es17IOAlEJwVsMAKyiNP29g3UQiAAA+BOlae1kaeq1JCEQwVJK0x+K0tQrHKO0/Y6Utn4l6XLmOclmU69evRzdCm4zAhEsgT9qKEv4fS4+eTmZpfajJHBrCESwBP6ooSwpjb/PUun+nS5N655Y51k4BCJYCn/UUJaUpt9nid9plGzlHN0AAACAoxGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5RGIAACA5ZWYQDRt2jTZbDYNGzbMHMvOzlZkZKSqVq2qihUrKjw8XCkpKXbPS0pKUlhYmMqXL6/q1atr1KhRunTpkl3Nli1b1KxZM7m5ualevXqKjo4uhiMCAAClRYkIRHv37tW7776rxo0b240PHz5cX331lVauXKmtW7fq5MmT6tatmzl/+fJlhYWF6eLFi9q5c6eWLFmi6OhojR071qw5fvy4wsLC1L59eyUkJGjYsGEaMGCANmzYUGzHBwAASjaHB6LMzEz17NlT//73v1W5cmVzPD09XYsWLdKsWbPUoUMHBQUFafHixdq5c6d27dolSdq4caOOHDmijz76SE2bNtUjjzyiSZMmaf78+bp48aIkKSoqSv7+/po5c6YCAgI0aNAgPfHEE5o9e7ZDjhcAAJQ8Dg9EkZGRCgsLU0hIiN14fHy8cnNz7cYbNmyoOnXqKC4uTpIUFxene++9Vz4+PmZNaGioMjIydPjwYbPm6n2Hhoaa+7iWnJwcZWRk2G0AAKDscnbkiy9fvlz79+/X3r17C8wlJyfL1dVV3t7eduM+Pj5KTk42a64MQ/nz+XM3qsnIyNCFCxfk4eFR4LWnTp2qCRMmFPq4AABA6eKwK0QnTpzQ0KFDtXTpUrm7uzuqjWsaM2aM0tPTze3EiROObgkAANxGDgtE8fHxSk1NVbNmzeTs7CxnZ2dt3bpVc+fOlbOzs3x8fHTx4kWlpaXZPS8lJUW+vr6SJF9f3wJ3neU//rMaT0/Pa14dkiQ3Nzd5enrabQAAoOxyWCDq2LGjDh06pISEBHNr3ry5evbsaf7s4uKiTZs2mc9JTExUUlKSgoODJUnBwcE6dOiQUlNTzZqYmBh5enoqMDDQrLlyH/k1+fsAAABw2BqiSpUqqVGjRnZjFSpUUNWqVc3x/v37a8SIEapSpYo8PT01ePBgBQcHq2XLlpKkTp06KTAwUL1799b06dOVnJys119/XZGRkXJzc5MkDRw4UPPmzdPo0aP17LPPKjY2VitWrNCaNWuK94ABAECJ5dBF1X9m9uzZKleunMLDw5WTk6PQ0FC988475ryTk5NWr16tF198UcHBwapQoYIiIiI0ceJEs8bf319r1qzR8OHDNWfOHNWqVUsLFy5UaGioIw4JAACUQCUqEG3ZssXusbu7u+bPn6/58+df9zl+fn5au3btDffbrl07HThwoChaBAAAZZDDP4cIAADA0QhEAADA8ghEAADA8ghEAADA8ghEAADA8ghEAADA8ghEAADA8ghEAADA8ghEAADA8goViH766aei7gMAAMBhChWI6tWrp/bt2+ujjz5SdnZ2UfcEAABQrAoViPbv36/GjRtrxIgR8vX11QsvvKA9e/YUdW8AAADFolCBqGnTppozZ45Onjyp999/X6dOnVLr1q3VqFEjzZo1S7/99ltR9wkAAHDb/KVF1c7OzurWrZtWrlypt956Sz/88INefvll1a5dW3369NGpU6eKqk8AAIDb5i8Fon379umll15SjRo1NGvWLL388sv68ccfFRMTo5MnT6pLly5F1ScAAMBt41yYJ82aNUuLFy9WYmKiOnfurA8++ECdO3dWuXJ/5Ct/f39FR0erbt26RdkrAADAbVGoQLRgwQI9++yz6tu3r2rUqHHNmurVq2vRokV/qTkAAIDiUKhAdOzYsT+tcXV1VURERGF2DwAAUKwKtYZo8eLFWrlyZYHxlStXasmSJX+5KQAAgOJUqEA0depUVatWrcB49erVNWXKlL/cFAAAQHEqVCBKSkqSv79/gXE/Pz8lJSX95aYAAACKU6ECUfXq1XXw4MEC4998842qVq36l5sCAAAoToUKRE8//bSGDBmizZs36/Lly7p8+bJiY2M1dOhQ9ejRo6h7BAAAuK0KdZfZpEmT9PPPP6tjx45ydv5jF3l5eerTpw9riAAAQKlTqEDk6uqqTz75RJMmTdI333wjDw8P3XvvvfLz8yvq/gAAAG67QgWifHfffbfuvvvuouoFAADAIQoViC5fvqzo6Ght2rRJqampysvLs5uPjY0tkuYAAACKQ6EC0dChQxUdHa2wsDA1atRINputqPsCAAAoNoUKRMuXL9eKFSvUuXPnou4HAACg2BXqtntXV1fVq1evqHsBAABwiEIFopEjR2rOnDkyDKOo+wEAACh2hXrLbPv27dq8ebPWrVune+65Ry4uLnbzn332WZE0BwAAUBwKFYi8vb31+OOPF3UvAAAADlGoQLR48eKi7gMAAMBhCrWGSJIuXbqkr7/+Wu+++67Onz8vSTp58qQyMzOLrDkAAIDiUKgrRP/73//08MMPKykpSTk5OXrooYdUqVIlvfXWW8rJyVFUVFRR9wkAAHDbFOoK0dChQ9W8eXOdO3dOHh4e5vjjjz+uTZs2FVlzAAAAxaFQV4j++9//aufOnXJ1dbUbr1u3rn799dciaQwAAKC4FOoKUV5eni5fvlxg/JdfflGlSpX+clMAAADFqVCBqFOnTnr77bfNxzabTZmZmRo3bhxf5wEAAEqdQr1lNnPmTIWGhiowMFDZ2dl65plndOzYMVWrVk0ff/xxUfcIAABwWxUqENWqVUvffPONli9froMHDyozM1P9+/dXz5497RZZAwAAlAaFCkSS5OzsrF69ehVlLwAAAA5RqED0wQcf3HC+T58+hWoGAADAEQoViIYOHWr3ODc3V7///rtcXV1Vvnx5AhEAAChVCnWX2blz5+y2zMxMJSYmqnXr1iyqBgAApU6hv8vsavXr19e0adMKXD0CAAAo6YosEEl/LLQ+efLkTdcvWLBAjRs3lqenpzw9PRUcHKx169aZ89nZ2YqMjFTVqlVVsWJFhYeHKyUlxW4fSUlJCgsLU/ny5VW9enWNGjVKly5dsqvZsmWLmjVrJjc3N9WrV0/R0dF/6TgBAEDZUqg1RF9++aXdY8MwdOrUKc2bN0+tWrW66f3UqlVL06ZNU/369WUYhpYsWaIuXbrowIEDuueeezR8+HCtWbNGK1eulJeXlwYNGqRu3bppx44dkqTLly8rLCxMvr6+2rlzp06dOqU+ffrIxcVFU6ZMkSQdP35cYWFhGjhwoJYuXapNmzZpwIABqlGjhkJDQwtz+AAAoIwpVCDq2rWr3WObzaY77rhDHTp00MyZM296P4899pjd4zfffFMLFizQrl27VKtWLS1atEjLli1Thw4dJEmLFy9WQECAdu3apZYtW2rjxo06cuSIvv76a/n4+Khp06aaNGmSXnnlFY0fP16urq6KioqSv7+/2VdAQIC2b9+u2bNnE4gAAICkv/BdZlduly9fVnJyspYtW6YaNWoUqpHLly9r+fLlysrKUnBwsOLj45Wbm6uQkBCzpmHDhqpTp47i4uIkSXFxcbr33nvl4+Nj1oSGhiojI0OHDx82a67cR35N/j4AAAAK/cGMReXQoUMKDg5Wdna2KlasqFWrVikwMFAJCQlydXWVt7e3Xb2Pj4+Sk5MlScnJyXZhKH8+f+5GNRkZGbpw4cI1P1k7JydHOTk55uOMjIy/fJwAAKDkKlQgGjFixE3Xzpo164bzDRo0UEJCgtLT0/Xpp58qIiJCW7duLUxbRWbq1KmaMGGCQ3sAAADFp1CB6MCBAzpw4IByc3PVoEEDSdLRo0fl5OSkZs2amXU2m+1P9+Xq6qp69epJkoKCgrR3717NmTNH3bt318WLF5WWlmZ3lSglJUW+vr6SJF9fX+3Zs8duf/l3oV1Zc/WdaSkpKfL09Lzu966NGTPGLvRlZGSodu3af3osAACgdCrUGqLHHntMbdu21S+//KL9+/dr//79OnHihNq3b69HH31Umzdv1ubNmxUbG3vL+87Ly1NOTo6CgoLk4uKiTZs2mXOJiYlKSkpScHCwJCk4OFiHDh1SamqqWRMTEyNPT08FBgaaNVfuI78mfx/X4ubmZn4UQP4GAADKrkJdIZo5c6Y2btyoypUrm2OVK1fW5MmT1alTJ40cOfKm9jNmzBg98sgjqlOnjs6fP69ly5Zpy5Yt2rBhg7y8vNS/f3+NGDFCVapUkaenpwYPHqzg4GC1bNlSktSpUycFBgaqd+/emj59upKTk/X6668rMjJSbm5ukqSBAwdq3rx5Gj16tJ599lnFxsZqxYoVWrNmTWEOHQAAlEGFCkQZGRn67bffCoz/9ttvOn/+/E3vJzU1VX369NGpU6fk5eWlxo0ba8OGDXrooYckSbNnz1a5cuUUHh6unJwchYaG6p133jGf7+TkpNWrV+vFF19UcHCwKlSooIiICE2cONGs8ff315o1azR8+HDNmTNHtWrV0sKFC7nlHgAAmAoViB5//HH169dPM2fO1AMPPCBJ2r17t0aNGqVu3brd9H4WLVp0w3l3d3fNnz9f8+fPv26Nn5+f1q5de8P9tGvXTgcOHLjpvgAAgLUUKhBFRUXp5Zdf1jPPPKPc3Nw/duTsrP79+2vGjBlF2iAAAMDtVqhAVL58eb3zzjuaMWOGfvzxR0nSXXfdpQoVKhRpcwAAAMXhL32566lTp3Tq1CnVr19fFSpUkGEYRdUXAABAsSlUIDpz5ow6duyou+++W507d9apU6ckSf3797/pO8wAAABKikIFouHDh8vFxUVJSUkqX768Od69e3etX7++yJoDAAAoDoVaQ7Rx40Zt2LBBtWrVshuvX7++/ve//xVJYwAAAMWlUFeIsrKy7K4M5Tt79qz5gYgAAAClRaECUZs2bfTBBx+Yj202m/Ly8jR9+nS1b9++yJoDAAAoDoV6y2z69Onq2LGj9u3bp4sXL2r06NE6fPiwzp49qx07dhR1jwAAALdVoa4QNWrUSEePHlXr1q3VpUsXZWVlqVu3bjpw4IDuuuuuou4RAADgtrrlK0S5ubl6+OGHFRUVpddee+129AQAAFCsbvkKkYuLiw4ePHg7egEAAHCIQr1l1qtXrz/9YlYAAIDSolCLqi9duqT3339fX3/9tYKCggp8h9msWbOKpDkAAIDicEuB6KefflLdunX17bffqlmzZpKko0eP2tXYbLai6w4AAKAY3FIgql+/vk6dOqXNmzdL+uOrOubOnSsfH5/b0hwAAEBxuKU1RFd/m/26deuUlZVVpA0BAAAUt0Itqs53dUACAAAojW4pENlstgJrhFgzBAAASrtbWkNkGIb69u1rfoFrdna2Bg4cWOAus88++6zoOgQAALjNbikQRURE2D3u1atXkTYDAADgCLcUiBYvXny7+gAAAHCYv7SoGgAAoCwgEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMsjEAEAAMtzaCCaOnWq7r//flWqVEnVq1dX165dlZiYaFeTnZ2tyMhIVa1aVRUrVlR4eLhSUlLsapKSkhQWFqby5curevXqGjVqlC5dumRXs2XLFjVr1kxubm6qV6+eoqOjb/fhAQCAUsKhgWjr1q2KjIzUrl27FBMTo9zcXHXq1ElZWVlmzfDhw/XVV19p5cqV2rp1q06ePKlu3bqZ85cvX1ZYWJguXryonTt3asmSJYqOjtbYsWPNmuPHjyssLEzt27dXQkKChg0bpgEDBmjDhg3FerwAAKBkcnbki69fv97ucXR0tKpXr674+Hi1bdtW6enpWrRokZYtW6YOHTpIkhYvXqyAgADt2rVLLVu21MaNG3XkyBF9/fXX8vHxUdOmTTVp0iS98sorGj9+vFxdXRUVFSV/f3/NnDlTkhQQEKDt27dr9uzZCg0NLfbjBgAAJUuJWkOUnp4uSapSpYokKT4+Xrm5uQoJCTFrGjZsqDp16iguLk6SFBcXp3vvvVc+Pj5mTWhoqDIyMnT48GGz5sp95Nfk7+NqOTk5ysjIsNsAAEDZVWICUV5enoYNG6ZWrVqpUaNGkqTk5GS5urrK29vbrtbHx0fJyclmzZVhKH8+f+5GNRkZGbpw4UKBXqZOnSovLy9zq127dpEcIwAAKJlKTCCKjIzUt99+q+XLlzu6FY0ZM0bp6enmduLECUe3BAAAbiOHriHKN2jQIK1evVrbtm1TrVq1zHFfX19dvHhRaWlpdleJUlJS5Ovra9bs2bPHbn/5d6FdWXP1nWkpKSny9PSUh4dHgX7c3Nzk5uZWJMcGAABKPodeITIMQ4MGDdKqVasUGxsrf39/u/mgoCC5uLho06ZN5lhiYqKSkpIUHBwsSQoODtahQ4eUmppq1sTExMjT01OBgYFmzZX7yK/J3wcAALA2h14hioyM1LJly/TFF1+oUqVK5pofLy8veXh4yMvLS/3799eIESNUpUoVeXp6avDgwQoODlbLli0lSZ06dVJgYKB69+6t6dOnKzk5Wa+//roiIyPNqzwDBw7UvHnzNHr0aD377LOKjY3VihUrtGbNGocdOwAAKDkceoVowYIFSk9PV7t27VSjRg1z++STT8ya2bNn69FHH1V4eLjatm0rX19fffbZZ+a8k5OTVq9eLScnJwUHB6tXr17q06ePJk6caNb4+/trzZo1iomJUZMmTTRz5kwtXLiQW+4BAIAkB18hMgzjT2vc3d01f/58zZ8//7o1fn5+Wrt27Q33065dOx04cOCWewQAAGVfibnLDAAAwFEIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIIRAAAwPIcGoi2bdumxx57TDVr1pTNZtPnn39uN28YhsaOHasaNWrIw8NDISEhOnbsmF3N2bNn1bNnT3l6esrb21v9+/dXZmamXc3BgwfVpk0bubu7q3bt2po+ffrtPjQAAFCKODQQZWVlqUmTJpo/f/4156dPn665c+cqKipKu3fvVoUKFRQaGqrs7GyzpmfPnjp8+LBiYmK0evVqbdu2Tc8//7w5n5GRoU6dOsnPz0/x8fGaMWOGxo8fr/fee++2Hx8AACgdnB354o888ogeeeSRa84ZhqG3335br7/+urp06SJJ+uCDD+Tj46PPP/9cPXr00Hfffaf169dr7969at68uSTpX//6lzp37qx//vOfqlmzppYuXaqLFy/q/fffl6urq+655x4lJCRo1qxZdsEJAABYV4ldQ3T8+HElJycrJCTEHPPy8lKLFi0UFxcnSYqLi5O3t7cZhiQpJCRE5cqV0+7du82atm3bytXV1awJDQ1VYmKizp07d83XzsnJUUZGht0GAADKrhIbiJKTkyVJPj4+duM+Pj7mXHJysqpXr2437+zsrCpVqtjVXGsfV77G1aZOnSovLy9zq1279l8/IAAAUGKV2EDkSGPGjFF6erq5nThxwtEtAQCA26jEBiJfX19JUkpKit14SkqKOefr66vU1FS7+UuXLuns2bN2Ndfax5WvcTU3Nzd5enrabQAAoOwqsYHI399fvr6+2rRpkzmWkZGh3bt3Kzg4WJIUHBystLQ0xcfHmzWxsbHKy8tTixYtzJpt27YpNzfXrImJiVGDBg1UuXLlYjoaAABQkjk0EGVmZiohIUEJCQmS/lhInZCQoKSkJNlsNg0bNkyTJ0/Wl19+qUOHDqlPnz6qWbOmunbtKkkKCAjQww8/rOeee0579uzRjh07NGjQIPXo0UM1a9aUJD3zzDNydXVV//79dfjwYX3yySeaM2eORowY4aCjBgAAJY1Db7vft2+f2rdvbz7ODykRERGKjo7W6NGjlZWVpeeff15paWlq3bq11q9fL3d3d/M5S5cu1aBBg9SxY0eVK1dO4eHhmjt3rjnv5eWljRs3KjIyUkFBQapWrZrGjh3LLfcAAMDk0EDUrl07GYZx3XmbzaaJEydq4sSJ162pUqWKli1bdsPXady4sf773/8Wuk8AAFC2ldg1RAAAAMWFQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACyPQAQAACzPUoFo/vz5qlu3rtzd3dWiRQvt2bPH0S0BAIASwDKB6JNPPtGIESM0btw47d+/X02aNFFoaKhSU1Md3RoAAHAwywSiWbNm6bnnnlO/fv0UGBioqKgolS9fXu+//76jWwMAAA5miUB08eJFxcfHKyQkxBwrV66cQkJCFBcX58DOAABASeDs6AaKw+nTp3X58mX5+PjYjfv4+Oj7778vUJ+Tk6OcnBzzcXp6uiQpIyOjyHvLzMz84zWTf1Dexewi3//tkHvmhCR6vt3ouXjQc/EpjX3Tc/HIPfuLpD/+TSzKf2vz92UYxp8XGxbw66+/GpKMnTt32o2PGjXKeOCBBwrUjxs3zpDExsbGxsbGVga2EydO/GlWsMQVomrVqsnJyUkpKSl24ykpKfL19S1QP2bMGI0YMcJ8nJeXp7Nnz6pq1aqy2Wy3vV9HysjIUO3atXXixAl5eno6up0yi/NcPDjPxYdzXTw4z7fGMAydP39eNWvW/NNaSwQiV1dXBQUFadOmTerataukP0LOpk2bNGjQoAL1bm5ucnNzsxvz9vYuhk5LDk9PT/7HVgw4z8WD81x8ONfFg/N887y8vG6qzhKBSJJGjBihiIgINW/eXA888IDefvttZWVlqV+/fo5uDQAAOJhlAlH37t3122+/aezYsUpOTlbTpk21fv36AgutAQCA9VgmEEnSoEGDrvkWGf6Pm5ubxo0bV+AtQxQtznPx4DwXH8518eA83z42w7iZe9EAAADKLkt8MCMAAMCNEIgAAIDlEYgAAIDlEYgAAIDlEYgsaOrUqbr//vtVqVIlVa9eXV27dlViYqJdTXZ2tiIjI1W1alVVrFhR4eHhBT7pG39uwYIFaty4sfkhasHBwVq3bp05z3kuetOmTZPNZtOwYcPMMc5z0Rg/frxsNpvd1rBhQ3Oe81x0fv31V/Xq1UtVq1aVh4eH7r33Xu3bt8+cNwxDY8eOVY0aNeTh4aGQkBAdO3bMgR2XfgQiC9q6dasiIyO1a9cuxcTEKDc3V506dVJWVpZZM3z4cH311VdauXKltm7dqpMnT6pbt24O7Lp0qlWrlqZNm6b4+Hjt27dPHTp0UJcuXXT48GFJnOeitnfvXr377rtq3Lix3Tjnuejcc889OnXqlLlt377dnOM8F41z586pVatWcnFx0bp163TkyBHNnDlTlStXNmumT5+uuXPnKioqSrt371aFChUUGhqq7OzS8WWuJVKRfHsqSrXU1FRDkrF161bDMAwjLS3NcHFxMVauXGnWfPfdd4YkIy4uzlFtlhmVK1c2Fi5cyHkuYufPnzfq169vxMTEGA8++KAxdOhQwzD4fS5K48aNM5o0aXLNOc5z0XnllVeM1q1bX3c+Ly/P8PX1NWbMmGGOpaWlGW5ubsbHH39cHC2WSVwhgtLT0yVJVapUkSTFx8crNzdXISEhZk3Dhg1Vp04dxcXFOaTHsuDy5ctavny5srKyFBwczHkuYpGRkQoLC7M7nxK/z0Xt2LFjqlmzpu6880717NlTSUlJkjjPRenLL79U8+bN9eSTT6p69eq677779O9//9ucP378uJKTk+3OtZeXl1q0aMG5/gsIRBaXl5enYcOGqVWrVmrUqJEkKTk5Wa6urgW+0NbHx0fJyckO6LJ0O3TokCpWrCg3NzcNHDhQq1atUmBgIOe5CC1fvlz79+/X1KlTC8xxnotOixYtFB0drfXr12vBggU6fvy42rRpo/Pnz3Oei9BPP/2kBQsWqH79+tqwYYNefPFFDRkyREuWLJEk83xe/dVTnOu/xlJf3YGCIiMj9e2339qtA0DRatCggRISEpSenq5PP/1UERER2rp1q6PbKjNOnDihoUOHKiYmRu7u7o5up0x75JFHzJ8bN26sFi1ayM/PTytWrJCHh4cDOytb8vLy1Lx5c02ZMkWSdN999+nbb79VVFSUIiIiHNxd2cUVIgsbNGiQVq9erc2bN6tWrVrmuK+vry5evKi0tDS7+pSUFPn6+hZzl6Wfq6ur6tWrp6CgIE2dOlVNmjTRnDlzOM9FJD4+XqmpqWrWrJmcnZ3l7OysrVu3au7cuXJ2dpaPjw/n+Tbx9vbW3XffrR9++IHf5yJUo0YNBQYG2o0FBASYb0/mn8+r7+DjXP81BCILMgxDgwYN0qpVqxQbGyt/f3+7+aCgILm4uGjTpk3mWGJiopKSkhQcHFzc7ZY5eXl5ysnJ4TwXkY4dO+rQoUNKSEgwt+bNm6tnz57mz5zn2yMzM1M//vijatSowe9zEWrVqlWBj0I5evSo/Pz8JEn+/v7y9fW1O9cZGRnavXs35/qvcPSqbhS/F1980fDy8jK2bNlinDp1ytx+//13s2bgwIFGnTp1jNjYWGPfvn1GcHCwERwc7MCuS6dXX33V2Lp1q3H8+HHj4MGDxquvvmrYbDZj48aNhmFwnm+XK+8yMwzOc1EZOXKksWXLFuP48ePGjh07jJCQEKNatWpGamqqYRic56KyZ88ew9nZ2XjzzTeNY8eOGUuXLjXKly9vfPTRR2bNtGnTDG9vb+OLL74wDh48aHTp0sXw9/c3Lly44MDOSzcCkQVJuua2ePFis+bChQvGSy+9ZFSuXNkoX7688fjjjxunTp1yXNOl1LPPPmv4+fkZrq6uxh133GF07NjRDEOGwXm+Xa4ORJznotG9e3ejRo0ahqurq/G3v/3N6N69u/HDDz+Y85znovPVV18ZjRo1Mtzc3IyGDRsa7733nt18Xl6e8cYbbxg+Pj6Gm5ub0bFjRyMxMdFB3ZYNNsMwDEdeoQIAAHA01hABAADLIxABAADLIxABAADLIxABAADLIxABAADLIxABAADLIxABAADLIxABAADLIxABKFPi4uLk5OSksLAwR7cCoBThk6oBlCkDBgxQxYoVtWjRIiUmJqpmzZqObglAKcAVIgBlRmZmpj755BO9+OKLCgsLU3R0tN38l19+qfr168vd3V3t27fXkiVLZLPZlJaWZtZs375dbdq0kYeHh2rXrq0hQ4YoKyvLnH/nnXfMffj4+OiJJ54opqMDcDsRiACUGStWrFDDhg3VoEED9erVS++//77yL4IfP35cTzzxhLp27apvvvlGL7zwgl577TW75//44496+OGHFR4eroMHD+qTTz7R9u3bNWjQIEnSvn37NGTIEE2cOFGJiYlav3692rZtW+zHCaDo8ZYZgDKjVatWeuqppzR06FBdunRJNWrU0MqVK9WuXTu9+uqrWrNmjQ4dOmTWv/7663rzzTd17tw5eXt7a8CAAXJyctK7775r1mzfvl0PPvigsrKytHbtWvXr10+//PKLKlWq5IhDBHCbcIUIQJmQmJioPXv26Omnn5YkOTs7q3v37lq0aJE5f//999s954EHHrB7/M033yg6OloVK1Y0t9DQUOXl5en48eN66KGH5OfnpzvvvFO9e/fW0qVL9fvvvxfPAQK4rZwd3QAAFIVFixbp0qVLdouoDcOQm5ub5s2bd1P7yMzM1AsvvKAhQ4YUmKtTp45cXV21f/9+bdmyRRs3btTYsWM1fvx47d27V97e3kV1KAAcgEAEoNS7dOmSPvjgA82cOVOdOnWym+vatas+/vhjNWjQQGvXrrWb27t3r93jZs2a6ciRI6pXr951X8vZ2VkhISEKCQnRuHHj5O3trdjYWHXr1q3oDghAsSMQASj1Vq9erXPnzql///7y8vKymwsPD9eiRYu0YsUKzZo1S6+88or69++vhIQE8y40m80mSXrllVfUsmVLDRo0SAMGDFCFChV05MgRxcTEaN68eVq9erV++ukntW3bVpUrV9batWuVl5enBg0aFPchAyhirCECUOotWrRIISEhBcKQ9Ecg2rdvn86fP69PP/1Un332mRo3bqwFCxaYd5m5ublJkho3bqytW7fq6NGjatOmje677z6NHTvWfBvO29tbn332mTp06KCAgABFRUXp448/1j333FN8BwvgtuAuMwCW9eabbyoqKkonTpxwdCsAHIy3zABYxjvvvKP7779fVatW1Y4dOzRjxgzzM4YAWBuBCIBlHDt2TJMnT9bZs2dVp04djRw5UmPGjHF0WwBKAN4yAwAAlseiagAAYHkEIgAAYHkEIgAAYHkEIgAAYHkEIgAAYHkEIgAAYHkEIgAAYHkEIgAAYHkEIgAAYHn/H6AQISu8j2vWAAAAAElFTkSuQmCC",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"%pip install matplotlib\n",
"\n",
"import matplotlib.pyplot as plt\n",
"\n",
"users_df = read_result[\"users\"].to_pandas()\n",
"\n",
"plt.hist(users_df[\"age\"], bins=10, edgecolor=\"black\")\n",
"plt.title(\"Histogram of Ages\")\n",
"plt.xlabel(\"Ages\")\n",
"plt.ylabel(\"Frequency\")\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "wSru-wGpHZrh"
},
"source": [
"## Working in SQL\n",
"\n",
"Since data is cached in a local DuckDB database, we can query the data with SQL.\n",
"\n",
"We can do this in multiple ways. One way is to use the [JupySQL Extension](https://jupysql.ploomber.io/en/latest/user-guide/template.html), which we'll use below.\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "xdotIOg70nuL",
"outputId": "b4ca91ec-0b36-42fd-d19e-16b8d1f7ae45"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/95.7 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m\u001b[90m━\u001b[0m \u001b[32m92.2/95.7 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m95.7/95.7 kB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m250.9/250.9 kB\u001b[0m \u001b[31m13.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m193.0/193.0 kB\u001b[0m \u001b[31m20.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.1/41.1 kB\u001b[0m \u001b[31m5.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDeploy Panel apps for free on Ploomber Cloud! Learn more: https://ploomber.io/s/signup\n"
]
}
],
"source": [
"# Install JupySQL to enable SQL cell magics\n",
"%pip install --quiet jupysql\n",
"# Load JupySQL extension\n",
"%load_ext sql\n",
"# Configure max row limit (optional)\n",
"%config SqlMagic.displaylimit = 200"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"id": "2tA6L1dHZ2w0"
},
"outputs": [],
"source": [
"# Get the SQLAlchemy 'engine' object for the cache\n",
"engine = read_result.cache.get_sql_engine()\n",
"# Pass the engine to JupySQL\n",
"%sql engine"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"id": "0eaYnErPaFsH",
"outputId": "3c017bfc-adf3-40bc-81af-e6b1478d229f"
},
"outputs": [
{
"data": {
"text/plain": [
"['main.users', 'main.purchases']"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Get table objects for the 'users' and 'purchases' streams\n",
"users_table = read_result.cache[\"users\"].to_sql_table()\n",
"purchases_table = read_result.cache[\"purchases\"].to_sql_table()\n",
"display([users_table.fullname, purchases_table.fullname])"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 271
},
"id": "VjeTOtKHHiA5",
"outputId": "a4bef98d-0572-4535-9b63-a37dba00723d"
},
"outputs": [
{
"data": {
"text/html": [
"Running query in 'duckdb:///.cache/default_cache_db.duckdb'"
],
"text/plain": [
"Running query in 'duckdb:///.cache/default_cache_db.duckdb'"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
"
],
"text/plain": [
"+---------------------+\n",
"| Name |\n",
"+---------------------+\n",
"| products |\n",
"| purchases |\n",
"| users |\n",
"| _PyAirbyte_state |\n",
"| _PyAirbyte_streams |\n",
"+---------------------+"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Show tables for the other streams\n",
"%sqlcmd tables"
]
}
],
"metadata": {
"colab": {
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_CoinAPI_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "En5baAhvYE_y"
},
"source": [
"In this demo, we use PyAirbyte to extract cryptocurrency data from [CoinAPI.io](https://www.coinapi.io/), followed by a series of transformations and analyses to derive meaningful insights from this data.\n",
"\n",
"The only prerequisite is a CoinAPI [API key](https://www.coinapi.io/get-free-api-key?product_id=market-data-api).\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8awBDcLvRW2g"
},
"source": [
"### Installing PyAirbyte\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xrhNw5kK5Lvx"
},
"outputs": [],
"source": [
"# Add virtual environment support in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install PyAirbyte\n",
"%pip install --quiet airbyte"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "mYsTAS1wRgO_"
},
"source": [
"### Load source data from CoinAPI.io to local cache\n",
"\n",
"In this section, we establish a connection to CoinAPI.io to access cryptocurrency data via PyAirbyte. The connector is configured with necessary parameters like the API key, environment setting, symbol ID for the specific cryptocurrency index (in this case, `COINBASE_SPOT_INDEX_USD`), and the data period we are interested in. [Check the docs](https://docs.airbyte.com/integrations/sources/coin-api) for more details.\n",
"\n",
"After configuring the source connector, we perform a check to ensure that the configuration is correct and the connection to the API is successful. Then, we proceed to read from the source into the internal DuckDB cache. The `read()` retrieves all available streams from the source.\n",
"\n",
"Note: The credentials are retrieved securely using the `get_secret()` method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9em82J2Q5WzN"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"# Create and configure the source connector:\n",
"source = ab.get_source(\n",
" \"source-coin-api\",\n",
" config={\n",
" \"api_key\": ab.get_secret(\"COIN_API_KEY\"),\n",
" \"environment\": \"production\",\n",
" \"symbol_id\": \"COINBASE_SPOT_INDEX_USD\",\n",
" \"period\": \"1DAY\",\n",
" \"start_date\": \"2023-01-01T00:00:00\",\n",
" },\n",
" streams=\"*\",\n",
")\n",
"\n",
"# Verify the config and creds by running `check`:\n",
"source.check()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MxY9a8RlD-PY"
},
"outputs": [],
"source": [
"# Read data from the source into the default cache:\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZKBR6MilSJta"
},
"source": [
"### Read data from the cache\n",
"\n",
"Read from the already-written DuckDB cache into a pandas Dataframe. After the data is in the cache, you can read it without re-configuring or re-creating the source object. You can also select a stream to read from.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "lFlveLjfGYof"
},
"outputs": [],
"source": [
"# Read from the cache into a pandas Dataframe:\n",
"ohlcv_df = cache[\"ohlcv_historical_data\"].to_pandas()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "IFBo4q26SVX_"
},
"source": [
"### Run data transformations\n",
"\n",
"In this section, we're transforming our data for analysis:\n",
"\n",
"- Convert `time_period_start` to datetime for easy handling of dates.\n",
"- Convert numeric columns to numeric types for calculations.\n",
"- Calculate `daily_movement` to analyze daily price changes in the market.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "vr37o4AgB_o6"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"# Convert 'time_period_start' to datetime format and necessary columns to numeric\n",
"ohlcv_df[\"time_period_start\"] = pd.to_datetime(ohlcv_df[\"time_period_start\"])\n",
"numeric_columns = [\n",
" \"price_open\",\n",
" \"price_high\",\n",
" \"price_low\",\n",
" \"price_close\",\n",
" \"volume_traded\",\n",
" \"trades_count\",\n",
"]\n",
"ohlcv_df[numeric_columns] = ohlcv_df[numeric_columns].apply(\n",
" pd.to_numeric, errors=\"coerce\"\n",
")\n",
"\n",
"# Calculate daily price movement\n",
"ohlcv_df[\"daily_movement\"] = ohlcv_df[\"price_close\"] - ohlcv_df[\"price_open\"]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "p7vX1_w4SYaF"
},
"source": [
"### Analyze the data\n",
"\n",
"Now, we delve into visual analysis:\n",
"\n",
"- We plot the `daily_movement` to observe day-to-day price changes.\n",
"- A dual-axis plot is created to compare 'price_close' with `volume_traded`, helping us see the relation between price and trading volume.\n",
"- We calculate and plot 7-day and 30-day moving averages alongside the closing price to identify trends and potential crossovers.\n",
"\n",
"These visualizations help us understanding market behaviors and trends in the data.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 607
},
"id": "VMRjR836LJxj",
"outputId": "3a63df58-fd14-4fd4-8802-7a922d36622a"
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAABKUAAAJOCAYAAABm7rQwAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9eXgc1ZX+/1bvaqnV2qzF8ibbeBGysQ3YmH23MHGAMAkJMUmYhEmckCHhm41sjifzS0ICGTKZDGQyySTBkBBm2BxAQFjCZjAgG1vI8ipblqy1W0tr6bXq90fVrV7US1V3VS/S+TwPD1Z3dVV19a1b9577nvdwgiAIIAiCIAiCIAiCIAiCIIgsYsj1CRAEQRAEQRAEQRAEQRCzDwpKEQRBEARBEARBEARBEFmHglIEQRAEQRAEQRAEQRBE1qGgFEEQBEEQBEEQBEEQBJF1KChFEARBEARBEARBEARBZB0KShEEQRAEQRAEQRAEQRBZh4JSBEEQBEEQBEEQBEEQRNahoBRBEARBEARBEARBEASRdSgoRRAEQRAEQRAEQRAEQWQdCkoRBEEQBFFw/P73vwfHcThx4oT82qWXXopLL700Z+cUyyuvvAKO4/DKK6/k+lQIgiAIgiDyEgpKEQRBEAShCyxwxP6z2WyYO3cuNm3ahH//93+Hx+PJ9SkmJfLcDQYD5s6di6uvvjrvg0zsnD/3uc/Fff873/mOvM3Q0FCWz64wePPNN/GDH/wAIyMjuT4VgiAIgpjRcIIgCLk+CYIgCIIgZh6///3vceutt+Jf/uVf0NDQgEAggL6+Przyyit44YUXsGDBAjz11FNYvXq16n2HQiEEAgFYrVZwHAcAskpKq6ARx3G46qqr8KlPfQqCIKCzsxP/+Z//iYGBATz99NO45pprkn6e53n4/X5YLBYYDNlbB2QBQJvNhv7+flgslqj3Fy9ejN7eXni9XgwODqKqqipr51Yo3HPPPfj617+Ozs5OLFq0KNenQxAEQRAzFlJKEQRBEAShK9dccw22bt2KW2+9FXfddReee+45/O1vf8PAwAA+/OEPY2pqSvU+jUYjbDabHJDSi2XLlmHr1q245ZZb8P3vfx8vvPACBEHAfffdl/AzXq8XPM/DYDDAZrNlNSDFaG5uxtjYGJ599tmo19988010dnbi2muvzfo5EQRBEARBxEJBKYIgCIIgss7ll1+O733vezh58iR27twpv75//3585jOfweLFi2Gz2VBbW4t//Md/hMvlivp8PE+pSMbHx1FcXIw77rhj2nvd3d0wGo348Y9/rPq8V61ahaqqKnR2dgII+0b9+c9/xne/+13U19fDbrdjbGwsoafU22+/jc2bN6O8vBzFxcVYvXo1fvGLX0Rt09HRgX/4h39ARUUFbDYbzjnnHDz11FOKz7O+vh4XX3wxHn744ajXH3roIaxatQpNTU1xP/foo4/i7LPPRlFREaqqqrB161b09PTI799zzz3gOA4nT56c9tm77roLFosFw8PDUd+1ubkZTqcTdrsdl1xyCd54442oz/3gBz8Ax3E4fPgwtm7dCqfTiTlz5uB73/seBEHAqVOncN1116G0tBS1tbW49957px3b5/Nh+/btWLp0KaxWK+bPn49vfOMb8Pl8UdtxHIfbb78dTzzxBJqammC1WnHmmWeipaUl6ny+/vWvAwAaGhrkVMdEbY0gCIIgiPShoBRBEARBEDnhlltuAQA8//zz8msvvPACjh8/jltvvRW//OUv8fGPfxx//vOfsXnzZqhxHCgpKcENN9yARx55BKFQKOq9P/3pTxAEAZ/85CdVn/Pw8DCGh4dRWVkZ9foPf/hDPP300/ja176GH/3oR9NS5iK/38UXX4z29nbccccduPfee3HZZZfhr3/9q7zNBx98gPPOOw8HDx7Et771Ldx7770oLi7G9ddfj8cff1zxud58883YtWsXxsfHAQDBYBCPPvoobr755rjb//73v8fHPvYxOWB322234bHHHsOFF14oeyt97GMfA8dx+Mtf/jLt83/5y19w9dVXo7y8HADw0ksv4eKLL8bY2Bi2b9+OH/3oRxgZGcHll1+OPXv2TPv8TTfdBJ7n8ZOf/AQbNmzAv/7rv+K+++7DVVddhfr6etx9991YunQpvva1r+HVV1+VP8fzPD784Q/jnnvuwZYtW/DLX/4S119/Pf7t3/4NN91007TjvP766/jiF7+Ij3/84/jpT38Kr9eLG2+8UQ58fuQjH8EnPvEJAMC//du/4cEHH8SDDz6IOXPmKL72BEEQBEEoRCAIgiAIgtCB//mf/xEACO+8807CbZxOp7B27Vr578nJyWnb/OlPfxIACK+++uq0fXd2dsqvXXLJJcIll1wi//3cc88JAIRnn302an+rV6+O2i4RAITPfvazwuDgoDAwMCC8/fbbwhVXXCEAEO69915BEATh5ZdfFgAIixcvnnbu7L2XX35ZEARBCAaDQkNDg7Bw4UJheHg4alue5+V/X3HFFcKqVasEr9cb9f75558vnHHGGYrO+0tf+pLgdrsFi8UiPPjgg4IgCMLTTz8tcBwnnDhxQti+fbsAQBgcHBQEQRD8fr9QXV0tNDU1CVNTU/K+/vrXvwoAhO9///vyaxs3bhTOPvvsqGPu2bNHACD88Y9/lM/3jDPOEDZt2hT13SYnJ4WGhgbhqquukl9j5/JP//RP8mvBYFCYN2+ewHGc8JOf/ER+fXh4WCgqKhI+/elPy689+OCDgsFgEF577bWoc3rggQcEAMIbb7wRdW0sFotw9OhR+bX3339fACD88pe/lF/72c9+Nq19EQRBEAShPaSUIgiCIAgiZ5SUlERV4SsqKpL/7fV6MTQ0hPPOOw8A0NraqmrfV155JebOnYuHHnpIfq2trQ379+/H1q1bFe3jt7/9LebMmYPq6mps2LABb7zxBu6880585Stfidru05/+dNS5x2Pv3r3o7OzEV77yFZSVlUW9x7yx3G43XnrpJXzsYx+Dx+PB0NAQhoaG4HK5sGnTJhw5ciQqnS4Z5eXlaG5uxp/+9CcAwMMPP4zzzz8fCxcunLbtu+++i4GBAXzxi1+EzWaTX7/22muxYsUKPP300/JrN910E9577z0cO3ZMfu2RRx6B1WrFddddBwDYt28fjhw5gptvvhkul0v+HhMTE7jiiivw6quvguf5qHOIrBZoNBpxzjnnQBAEfPazn5VfLysrw/Lly3H8+HH5tUcffRQrV67EihUr5OMMDQ3h8ssvBwC8/PLLUce58sorsWTJEvnv1atXo7S0NGqfBEEQBEFkB1OuT4AgCIIgiNnL+Pg4qqur5b/dbjd27NiBP//5zxgYGIjadnR0VNW+DQYDPvnJT+L+++/H5OQk7HY7HnroIdhsNnz0ox9VtI/rrrsOt99+OziOg8PhwJlnnoni4uJp2zU0NKTcFwviJPJzAoCjR49CEAR873vfw/e+97242wwMDKC+vl7R+d9888245ZZb0NXVhSeeeAI//elP427HPKKWL18+7b0VK1bg9ddfl//+6Ec/ijvvvBOPPPIIvv3tb0MQBDz66KO45pprUFpaCgA4cuQIADFYl4jR0VE51Q8AFixYEPW+0+mEzWabVh3Q6XRGeYwdOXIEBw8eTJheF9uOYo8DiAG8SC8sgiAIgiCyAwWlCIIgCILICd3d3RgdHcXSpUvl1z72sY/hzTffxNe//nWsWbMGJSUl4Hkezc3N05Q1SvjUpz6Fn/3sZ3jiiSfwiU98Ag8//DA+9KEPwel0Kvr8vHnzcOWVV6bcLpVKSinsO37ta1/Dpk2b4m4Teb1S8eEPfxhWqxWf/vSn4fP58LGPfSzjc5w7dy4uuugi/OUvf8G3v/1tvPXWW+jq6sLdd98tb8O+x89+9jOsWbMm7n5KSkqi/jYajdO2ifcagCh/MZ7nsWrVKvz85z+Pu+38+fNV75MgCIIgiOxAQSmCIAiCIHLCgw8+CABy8GV4eBgvvvgiduzYge9///vydkx1kw5NTU1Yu3YtHnroIcybNw9dXV345S9/mdmJpwlLGWtra0sY6Fq8eDEAwGw2KwqGpaKoqAjXX389du7ciWuuuWaa6ojBUvoOHTokp70xDh06NC3l76abbsIXv/hFHDp0CI888gjsdju2bNkiv8++a2lpqSbfIxlLlizB+++/jyuuuEJOg8wUrfZDEARBEERyyFOKIAiCIIis89JLL+GHP/whGhoa5Cp4TMESq1i57777MjrWLbfcgueffx733XcfKisrcc0112S0v3RZt24dGhoacN9998nV7BjsO1dXV+PSSy/Fr3/9a/T29k7bx+DgoOrjfu1rX8P27dsTpgMCwDnnnIPq6mo88MAD8Pl88uvPPvssDh48iGuvvTZq+xtvvBFGoxF/+tOf8Oijj+JDH/pQVFrj2WefjSVLluCee+6Rq/9l+j0S8bGPfQw9PT34zW9+M+29qakpTExMqN4n+y6xvxNBEARBENpCSimCIAiCIHTl2WefRUdHB4LBIPr7+/HSSy/hhRdewMKFC/HUU0/JxtqlpaW4+OKL8dOf/hSBQAD19fV4/vnn0dnZmdHxb775ZnzjG9/A448/jm3btsFsNmvxtVRjMBhw//33Y8uWLVizZg1uvfVW1NXVoaOjAx988AGee+45AMCvfvUrXHjhhVi1ahVuu+02LF68GP39/di9eze6u7vx/vvvqzruWWedhbPOOivpNmazGXfffTduvfVWXHLJJfjEJz6B/v5+/OIXv8CiRYvw1a9+NWr76upqXHbZZfj5z38Oj8eDm266adp3/e///m9cc801OPPMM3Hrrbeivr4ePT09ePnll1FaWopdu3ap+h6JuOWWW/CXv/wFX/jCF/Dyyy/jggsuQCgUQkdHB/7yl7/gueeewznnnKNqn2effTYA4Dvf+Q4+/vGPw2w2Y8uWLXH9xAiCIAiCSB8KShEEQRAEoSssFc9isaCiogKrVq3Cfffdh1tvvRUOhyNq24cffhhf/vKX8atf/QqCIODqq6/Gs88+i7lz56Z9/JqaGlx99dV45plncMstt2T0XTJl06ZNePnll7Fjxw7ce++94HkeS5YswW233SZv09jYiHfffRc7duzA73//e7hcLlRXV2Pt2rVRaY1a85nPfAZ2ux0/+clP8M1vfhPFxcW44YYbcPfdd0+rFgiIKXx/+9vf4HA4sHnz5mnvX3rppdi9ezd++MMf4j/+4z8wPj6O2tpabNiwAZ///Oc1O2+DwYAnnngC//Zv/4Y//vGPePzxx2G327F48WLccccdWLZsmep9nnvuufjhD3+IBx54AC0tLeB5Hp2dnRSUIgiCIAiN4QRydSQIgiAIYoZzww034MCBAzh69GiuT4UgCIIgCIKQIE8pgiAIgiBmNL29vXj66adzrpIiCIIgCIIgoqH0PYIgCIIgZiSdnZ1444038N///d8wm82apowRBEEQBEEQmUNKKYIgCIIgZiR///vfccstt6CzsxN/+MMfUFtbm+tTIgiCIAiCICIgTymCIAiCIAiCIAiCIAgi65BSiiAIgiAIgiAIgiAIgsg6FJQiCIIgCIIgCIIgCIIgsg4ZnaeA53mcPn0aDocDHMfl+nQIgiAIgiAIgiAIgiDyGkEQ4PF4MHfuXBgMifVQFJRKwenTpzF//vxcnwZBEARBEARBEARBEERBcerUKcybNy/h+xSUSoHD4QAgXsjS0tIcn036BAIBPP/887j66qthNptzfTpEAUFthygkqL0S6UJth0gHajdEOlC7IQoNarNEOoyNjWH+/PlyTCURFJRKAUvZKy0tLfiglN1uR2lpKXUkhCqo7RCFBLVXIl2o7RDpQO2GSAdqN0ShQW2WyIRUNkhkdE4QBEEQBEEQBEEQBEFkHQpKEQRBEARBEARBEARBEFmHglIEQRAEQRAEQRAEQRBE1qGgFEEQBEEQBEEQBEEQBJF1KChFEARBEARBEARBEARBZB0KShEEQRAEQRAEQRAEQRBZh4JSBEEQBEEQBEEQBEEQRNahoBRBEARBEARBEARBEASRdSgoRRAEQRAEQRAEQRAEQWQdCkoRBEEQBEEQBEEQBEEQWYeCUgRBEARBEARBEARBEETWoaAUQRAEQRAEQRAEQRAEkXUoKEUQBEEQBEEQBEEQBEFkHVOuT4AgCIIgCKIQCPEC9nS6MeDxotphw/qGChgN3Iw9LkEQBEEQhN5QUIogCIIgCCIFLW292LGrHb2jXvm1OqcN27c0ormpbsYdlyAIgiAIIhtQ+h5BEARBEEQSWtp6sW1na1RgCAD6Rr3YtrMVLW29M+q4BEEQBEEQ2YKCUgRBEARBEAkI8QJ27GqHEOc99tqOXe0I8fG2KLzjEgRBEARBZBMKShEEQRAEQSRgT6d7mlIpEgFA76gXezrdM+K4BEEQBEEQ2YSCUgRBEARBEAkY8CQODKWzXb4flyAIgiAIIptQUIogCIIgCCIB1Q6bptvl+3EJgiAIgiCyCQWlCIIgCIIgErC+oQJ1Thu4JNuU281Y31CR1eNyEKvwaX1cgiAIgiCIbEJBKYIgCIIgiAQYDRy2b2lMus3oVACPtXbrdtxEgantWxphNCQLlxEEQRAEQeQ3FJQiCIIgCIJIQnNTHe7fug52izHq9TqnDec1VIIXgK//737816vHAIiV83Yfc+HJfT3YfcyVdoU8dtya0ugUvYpiC+7fug7NTXXpfSGCIAiCIIg8wZTrEyAIgiAIgsh3mpvq8PT+Xuza34vr18zFTecuwPqGCnAAfvzsQfzmtU786JkOvHNiGAd6RtEXUTmvzmnD9i2NaQWRmpvqsGZ+Oc778Yvya5+/eDEFpAiCIAiCmBGQUoogCIIgCEIB7kk/AOCS5XOwcUkljAYOBgOH71zbiG9dswIA8EJ7f1RACgD6Rr3YtrMVLW29aR133BeI+vtQvyet/RAEQRAEQeQbFJQiCIIgCIJQgGtcDEpVFFunvXfbRYvhLDLH/RxL3tuxqz2tVL7hyeig1MFeCkoRBEEQBDEzoKAUQRAEQRCEAlwTYlCqstgy7b09nW6MTgWmvc4QAPSOerGn0636uCNSUKqqRDzu0QEPAiFe9X4IgiAIgiDyDQpKEQRBEARBpEAQBAyzoFTJ9KDUgMc77bV4KN0ukmEpbbBxrhMlVhMCIQHHBydU74cgCIIgCCLfoKAUQRAEQRBECsamgghKqXcVcZRS1Q7btNfioXS7SEakoFSF3YwVtQ4AwMHeMdX7IQiCIAiCyDcoKEUQBEEQBJGCoQkfAMBhNcFqMk57f31DBeqcNnAJPs9BrMK3vqFC9bFZ+l6Z3YKVdaUAgIN9FJQiCIIgCKLwoaAUQRAEQRBECtxJUvcAwGjgsH1LIwBMC0yxv7dvaYTRkChslZhhOShlxoo6ppQis3OCIAiCIAofCkoRBEEQBEGkwDUuKqXipe4xmpvqcP/Wdah1Rqfo1TptuH/rOjQ31aV17NEpMSBWHqGU6qD0PYIgCIIgZgAUlCIIgiAIgkiBXHmvxJp0u+amOrz+zctx+Yo5AICPnj0Pr3/z8rQDUgAwPBFWSi2vcYDjgAGPTw6UEQRBEARBFCoUlCIIgiAIgkiBa1wKSiVRSjGMBg4rakVFU7HVlFbKXiQjU2FPqWKrCQsr7ACAjj5K4SMIgiAIorChoBRBEARBEEQKmKdUsvS9SNh2w1LlvExg1ffKiswAIAe8qAIfQRAEQRCFDgWlCIIgCIIgUjAkpcqlSt9jlNvFoBQLZmUCC2yxfZLZOUEQBEEQMwUKShEEQRAEQaRArr6XZaWUNxCCN8ADAJx2USklm533kVKKIAiCIIjChoJSBEEQBEEQKZCDUiXKglLlLCglmZSny8ik+HmjgUOpzQQAWCml7x3pH0cgxGe0f4IgCIIgiFxCQSmCIAiCIIgUDI2r85Qql1RNmabvjUyF/aQ4TjRMn1dehBKrCf4Qj86hiYz2TxAEQRAEkUsoKEUQBEEQBJEEnhfkNLwqpZ5SUvBqKhDClD+U9rGZ0oql7gGAwcBheS3zlaIUPoIgCIIgChcKShEEQRAEQSRhdCqAEC8ACJuNp8JhNcFkEJVNmfhKjU5Fm5wzVpLZOUEQBEEQMwAKShEEQRAEQSTBJaXgOWwmWEzKhk4cx8lqqUxS+IYlT6myInPU6yskXylSShEEQRAEUchQUIogCIIgCCIJrnEfAOWpe4wKe+YV+NhnyxIopagCH0EQBEEQhQwFpQiCIAiCIJLAlE5KTc4Z5cWiuompndJhVPpsuT1aKbVcUkr1j/kyNlMnCIIgCILIFRSUIgiCIAiCSMKQFPSpVBmUYkGs4YzS95hSKjooVWI1YUGFHQDQQSl8BEEQBEEUKBSUIgiCIAiCSIJ7XApKlahUStkz95QaYZ5ScQzWZbPzPjI7JwiCIAiiMKGgFEEQBEEQRBLcE6KnVGWxSk+p4sw9pcJBKfO098jsnCAIgiCIQoeCUgRBEARBEEkYStdTSgul1JQ/al+RrKwTg1Jkdk4QBEEQRKFCQSmCIAiCIIgkpJu+p4VSipmkO4umK6VY+t7h/nEEQ3zaxyAIgiAIgsgVFJQiCIIgCIJIgivN9L3yYqaUSq/6niAIGJECWuVxVFrzy+0othjhD/LoHJpI6xgEQRAEQRC5hIJSBEEQBEEQSXCnnb4nqpvSrb436Q8hEBKi9hWJwcBhea2olmonXymCIAiCIAoQCkoRBEEQBEEkgOcFOShVlW71vUk/BEFQfWyW9mcxGlBkNsbdZoXsK0UV+AiCIAiCKDwoKEUQBEEQBJGAkakAeCmeFC+FLhlMWeUP8pj0h9QfO6LyHsdxcbdhZudUgY8gCIIgiEKEglIEQRAEQRAJcI2LflLOIjPMRnXDJrvFCItJ/Ew6Ffgig1KJWCml73X0klKKIAiCIIjCg4JSBEEQBEEQCXBJwaRKlSopAOA4DhX29Cvwsc+U2RMfm3lK9Y150/auIgiCIAiCyBUUlCIIgiAIgkgAUzhVqvSTYrCUv+FJ9RX4RqbEz8QzOWc4bGbMrygCABzsoxQ+giAIgiAKCwpKEQRBEARBJICl76mtvMeoKE6/At+I9JmyouTHXlkrmZ1TCh9BEARBEAUGBaUIgiAIgiASIKfvlVjT+rxcgS+doJSklCorTqyUAsIV+MjsnCAIgiCIQoOCUgRBEARBEAlwjafvKQWEFVYZeUqlUEo11klm532klCIIgiAIorCgoBRBEARBEEQCmMIp3fS9TJRSo5OpPaUAYIWUvneo34NgiFd9HIIgCIIgiFxhyvUJEARBEARB5CtDkqdUuul7miilUgSlFlTYYbcYMekP4YRrAkurHepPlCCIuIR4AXs63RjweFHtsGF9QwWMBm7GH5sgCCJbUFCKIAiCIAgiAXL1vTSVUiyglJanlKSUKrMnP7bBwGF5rQN7u0bQ3uuhoBRBaERLWy927GpH76hXfq3OacP2LY1obqqbsccmCILIJpS+RxAEQRAEkYCw0XmGnlITAdWfZUbn5SmCUkA4ha+DzM4JQhNa2nqxbWdrVFAIAPpGvdi2sxUtbb0z8tgEQRDZhoJSBEEQBEEQcQjxgpxCl7GnlMr0PZ4XMKIwfQ8gs3OC0JIQL2DHrnYIcd5jr+3Y1Y4QH2+Lwj02QRBELqCgFEEQBEEQRBxGJv0QpHlfhQK1UjzCSik/BEH5JNLjC4LNOZ1FqYNSK+pEpdRBUkoRRMbs6XRPUylFIgDoHfViT6d7Rh2bIAgiF1BQiiAIgiAIIg4sda/MbobJmN6QiSmlgrwAjy+o+HNMJVVkNsJmNqbcfnmtqJTqHfXKnyUIIj0GPImDQulsVyjHJgiCyAUUlCIIgiAIgoiDazyz1D0AKLIYUSQFlUZU+Eoxk/NyBal7AFBqM2NeeREA4GAvpfARRCZUO2yablcoxyYIgsgFFJQiCIIgCIKIg2vCBwCoKrZmtB8W1FLjK8W8rJwq0gZXSil8HX2UwkcQmbC+oQJ1Thu4BO9zECvhrW+omFHHJgiCyAUUlCIIgiAIgoiDeyJzpRQAlBeLaqfhCeVBKbVKKQBYKaXwka8UQWSG0cBh+5bGuO+xYNH2LY0wGhKFjgrz2ARBELmg4IJSv/rVr7Bo0SLYbDZs2LABe/bsSbjtBx98gBtvvBGLFi0Cx3G47777sneiBEEQBEEUNENS+l5lSYZBKVaBT1VQyh/1WSUws/N3TwzjyX092H3MRRW6CCJNmpvqcP/WdbCaoqdLtU4b7t+6Ds1Ndboe+z8/uW7a69k4dqEQ4gXsPuaivo4gZgCmXJ+AGh555BHceeedeOCBB7Bhwwbcd9992LRpEw4dOoTq6upp209OTmLx4sX46Ec/iq9+9as5OGOCIAiCIAoVt5S+V5mhUkquwKcqfU9USjlVKKUGPeL5Hh+awB1/3gdATPPZvqWRJrEEkQbNTXWoL+vA8aFJAMDiqmK8cOclWVEpnb+kKurvX928Ds1NtaSQAtDS1osdu9qjqhRSX0cQhUtBKaV+/vOf47bbbsOtt96KxsZGPPDAA7Db7fjd734Xd/tzzz0XP/vZz/Dxj38cVmtmfhAEQRAEQcwuXLJSKrMxRDpKqdEpdel7LW29+MFTH0x7vW/Ui207W9HS1qv42ARBiPC8gO6RcOBjdCqQtaDQ4Lgv6u+FlXYKSEHs67btbI0KSAHU1xFEIVMwQSm/34/33nsPV155pfyawWDAlVdeid27d+fwzAiCIAiCmIm4NPKUSk8pJW5bVpT62CFewI5d7YiXvMJe27GrndJbCEIl/R4v/EEenBQLck34MeUPZeXYQzFBqdgg1WyE+jqCmJkUTPre0NAQQqEQampqol6vqalBR0eHZsfx+Xzw+cKd/tiYaBYaCAQQCCgv5ZxvsHMv5O9A5AZqO0QhQe2VSJd4bcclTQLLbMaM2lSpVVwDHPL4FO9nWEoddFgNKT/zdqd7mmogEgFA76gXu48OYANV7NIU6nNmNsf6xXnA/PIiuCb8mPCFcHLIgyVzijPar5J20zcyOe3v2d7OqK/LHdTXEemgtL0UTFAqW/z4xz/Gjh07pr3+/PPPw2635+CMtOWFF17I9SkQBQq1HaKQoPZKpEtk2+kbNgLg0Nb6FtwZrH+dGOIAGHG8ux/PPPOMss+cFo99rH0/nul7P+m270n7T8Xzr70N10FSEOgB9Tkzk7cGxHvLHppAwMBhAhyeeOFVrCzT5j5K1m5e7Y2+r994bz/sKfqCmQ71dbmH+jpCDZOTk6k3QgEFpaqqqmA0GtHf3x/1en9/P2prazU7zl133YU777xT/ntsbAzz58/H1VdfjdLSUs2Ok20CgQBeeOEFXHXVVTCblZumEgS1HaKQoPZKpEts2wmGeHzlrb8BAK5vvgJVGfhKlR934fdH3gNsJdi8+QJFn7mn4zVgYgpXXHQezl5YnnTbyk43/njk3ZT7vPqiDaQe0Bjqc2Y2HS8cAY51Yt3yBTg94kXv4SHUn7EKm8+dl9F+lbSbjheOACc65b+r6huwefOKjI5b6FBflzuoryPSgWWdpaJgglIWiwVnn302XnzxRVx//fUAAJ7n8eKLL+L222/X7DhWqzWuKbrZbJ4RN+BM+R5E9qG2QxQS1F6JdGFtZ8TrgyAAHAfMKbXDZEzfhnNOqai0HpkMKG6XzOi8qtSe8jMbl1ajzmlD36g3rtcKB7GU/Mal1WSUrBPU58xMTkkm54uqSmA0GAEMoc/j0+y3TtZu3JNBAECZ3YyRyQDck8FZ38aor8s91NcRalDaVgrG6BwA7rzzTvzmN7/BH/7wBxw8eBDbtm3DxMQEbr31VgDApz71Kdx1113y9n6/H/v27cO+ffvg9/vR09ODffv24ejRo7n6CgRBEARBFACsUl5ZkTmjgBQQbXTOKzDgDYZ4jHnDE9JUGA0ctm9pBCBOyiJhf2/f0kiTNIJQSZdbTD1ZUFGM+vIiAEDP8FRWjs2MzVfWipkagx4yOo/s62Khvo4gCpeCCkrddNNNuOeee/D9738fa9aswb59+9DS0iKbn3d1daG3N1wG9PTp01i7di3Wrl2L3t5e3HPPPVi7di0+97nP5eorEASRA0K8gN3HXHhyXw92H3NRVRaCIFLCTM4rM0jbY7DAEi8AHinYlIyxiG3KipStMjY31eH+retQ67RFvV7rtOH+revQ3FSn4owJggDCQamFlXbUl0lBqZHsBKVY9b0VdY6ov2c7rK+zmaOnsdTXEUThUjDpe4zbb789YbreK6+8EvX3okWLIAg0+SSI2UxLWy927GqPqtZS57Rh+5ZGGrgQBJEQl6SUYiqnTLCajCixmjDuC8I96YczhfppeFI8tsNqUqXSam6qw1WNtbj2F6+ho9+D2y9bgq9etZxUAwSRBqNTAYxMimm0CyrsmAqEAGRPKTXkiVZKUVAqTHNTHc5/5xReOjQIANj52fXYuKSK+jqCKFAKSilFEAShhpa2Xmzb2TqtfHDfqBfbdraipa03wScJgpjtMKVUVUnmQSkAKC8WA1EsLTAZbCJcVqzet8No4HBmvROAGAyjSRpBpEeXS1RJVZVYUWw1YZ6klOob8yIY4nU9tiAIGBoX+4qVdWJQangygIDOxy0kPL6worSp3kl9HUEUMBSUIghiRhLiBezY1R7XCJO9tmNXO6XyEQQRF7eGSikAqLBLvlKKglLiNuX29I69sFI0Vj/pVlaKmSCI6Zx0TwAI309VJVZYjAbwghiY0pMxbxB+KQC1tLpEDri4xlP3H7OFyFRoJWnRBEHkLxSUIghiRrKn0z1NIRWJAKB31Is9ne7snRRBEAVDOH0vc08pACiXglvuydSTymFJKeVU6CcVC5tEd1FQiiDS5qSklFpYId5PBgOHuWWiZ5veKXzM1NxhNaHIYkSl1H9QCl+YMalCKQBM+CkoRRCFDAWlCIKYkQx4lK1iKt2OIIjZBVMkaJW+l02l1AJpEs3SjwiCUA+7f+ZL9xMAzM2S2TkLPlU5xKB4lVRwYZCCUjKR6qgJHwWlCKKQoaAUQRAzkmqHLfVGKrYjCGJ2oXX6XplduVJK9pRKYYieiIWVxQDEFCOvZM5MEIQ6IivvMeQKfDorpYZiPO1YcIqZn892QrwQ5Sk17qN+jiAKGQpKEQQxI1nfUIE6pw2JbC85iFX41jdUZPO0CIIoEIYmxMlfpUbpexWSabkipdSUuE1ZmkqpcrsZDqtYYPkUpfARRFrEDUqVZ0kpJQWf5kjBqDmSUmqIPKUAAOMxyqhx8pQiiIKGglIEQcxIjAYO27c0xn2PBaq2b2mkai0EQcSFKaUqNau+JymlJgIptgx7SpWnqZTiOA4LmNk5pfARhGp8wRBOj4qBpwUVxfLr9VlK3xuUlVJS+p5D7D8GSSkFINpPCqD0PYIodCgoRRDEjKW5qQ73b12HIrMx6vVapw33b12H5qa6HJ0ZQRD5TCDEyyl0lVpX31OUvseUUukFpYCwrxRV4CMI9XQPT0EQALvFGOUrJyul9E7f8zBPu1ilFAWlgOnV9mKVUwRBFBYUlCJ0JcQL2H3MhSf39WD3MRdCvJDrUyJUEOIFvN3pxntDHN7udBfk79fcVIf6srBvVFWJBa9/83IKSBEEkRAWOOK49FPoYmFKKWVG58xTKv1jM6UUpe8RhHqYyfmCCjs4Lqyonlcm3lc9I1MQBP3GREOxSikKSkUx5tVeKUVzFoLIHaZcnwAxc2lp68WOXe3oHQ1XN6tz2rB9SyMFBAqA6N/PiD8eebcgf7/RqQCODk7If0/6Q5SyRxBEUljlvQq7RbP+ghmmK1NKSUGpovSVUgullKOTrokUWxIEEQu7byL9pABRac1xgC/IY2jcL3s+aQ0LPsmeUg4KSkWitVKK5iwEkVtIKUXoQktbL7btbI3q3AGgb9SLbTtb0dLWm6MzI5Qwk36//d0jAMIpOJP+EKb8VKWFIIjEaF15DwDKJdXTyFQg5Qo8S98rz0ApxSbTlL5HEOrpcjM/qeiglMVkQI1UtVdPXylmaC5X35OUUuQpJRLrKZVJUGomjXkJolChoBShOSFewI5d7Yg35Gav7djVTrLYPGWm/X57u0YAAOcvrYLFKHZ5rgka1BEEkRiXDkEp5g8lCKKCMxH+II8JKXCuhadUt3uqYPprgsgXutyiUmpBZfG09/T2lRIEQQ4+hdP3mNIygECI1+W4hYRHo/S9mTbmJYhChYJShObs6XRPW22IRADQO+rFnk539k6KUMxM+/32nRoBAKydXyZX0XIr8HQhCGL24orxc9ECs9GAUpvompCsDxqZEt8zcECpLf2g1NyyIpgMHPwhHn1jift0giCmw6pWLoxRSgGRFfj0USGOeYPwS4EnlrZXHpFKTGMY8RoBkK9JukqpmTbmJYhChYJShOYMeJQNfpVuR2SXmfT7CYKAvV3DAIC1C8pk1QPziyEIgoiHHul7kftL5ivF/KScRWYYMvCzMho4zJMUHeQrRRDK4XkBXVLaa6ynFKC/Uor5RjmsJtik6sEGAyfbEFAKX1gpVSMF7dINSs2kMS9BFDIUlCI0p9phS72Riu2I7DKTfr+TrkkMTwZgMRrQOLcUlZLqwUWrjARBJIH5uVSWaBuUYhX4kikdWHU+Lar+sdQjqsBHEMoZ8PjgC/IwGjjMlVRRkYSVUjoFpVjqXoyJuuwrRWbnGJsSg1C1TnEsOuFLzyt0Jo15CaKQoaAUoTnrGypQ57Qh0fouB7GixfqGimyeFqEQ9vslopB+P5a6d2Z9Kawmo7zK6KIBHUEQSXBLvnOVWiulpEDTcNL0PanyXgZ+UgyWesRSkQiCSA1TFtaXFcFsnD5VYkqpbp2UUoNy+nB0/8OCVEOklILHJ/aTdVKAMF1PKZqzEER+QEEpQnOMBg7btzTGfY91+tu3NGpWZpvQlpn0+8mpe/PLAYRTZ8iPgSCIZLhkpZS25d6Z+smdNH1PUkoVaRCUogp8BKEadr/EVt5jzMuWUqokVikl9h9DZEEgK6XmSouonjSDUjNpzEsQhQwFpQhdaG6qw/1b18nVzhi1Thvu37oOzU11OTozQgnNTXW46Iyqaa8X2u+3V1JKrVlQBiCcikPpe9GEeAG7j7nw5L4e7D7moiozxKxHP08pMdCUVCkleUqVa5G+J02qu7KglMplP0J9GKElLN11QRw/KSCslPJ4gxjzJq6kmS4s6BQblJoj/T1Eam/ZU6rOmZlSCgjPWcpj1KmFNuYliELGlOsTIGYuzU11aKg6jEP94wCAmlIrXv/m5bTaUCBEqomMnIA/3HouNi6tLpjfzxsIof30GACx8h4ASt+LQ0tbL3bsao+qPlPntGH7lkYaiBGzFjbp0zp9L+wplXgiO8yMzrVI35M8pfQ2Os9lP0J9GKE1ySrvAYDdYkK53YzhyQB6hqdQWpf5vRoJ63/mxHhKsb8pKBWuvsfsJib9IfC8kHZxiOamOni8QXz9f/cDAG69YBG+ey0ppAgiW5BSitAV92R44N0/5oM3kJ4RIZFdJv1BdPR55L9DAoezF5QV1MP5g9OjCPICqkqscgWqymJxQEfpeyItbb3YtrN1WjnkvlEvtu1sRUtbb47OjCByRyDEyxMerdP3ZE+pJOl7o1Pie1oqpca8QTktUGty2Y9QH0bowckklfcYelbgGxpPlL4nGZ2Tp1RYKRVhRD/hT18tBYiBLUZlsaWgxrwEUehQUIrQDZ4X5Mm/xSQ2tchAB5G/HOgeRUgK6DDG06xskiv2do0AANYuKAPHiQOLCvJjkAnxAnbsake8JBf22o5d7ZQGQ8w6mFLJwGnj6xSJsup7LH0v82MXWYyyukIPs/Nc9iPUhxF60SUpCxdUFCfcRs8KfIOeBEbnlL4nwzylqkosMEnBo/EMUviAcKBL/Hdm+yIIQh0UlCJ0Y8wbkAeDZy8QjaY7+sZyeUqEQpgX0zkLy2G3GAFAF98EPWFBqTVS6h4AVJFSSmZPp3uauiASAUDvqBd7Ot3ZOymCyAOYyXlFsSXtVJBEMI+qZKolpqJyaqCUAsIpSF06mJ3nsh+hPozQgzFvQA5MJ/KUAoD6MvE9PYJSsqdUTPpelYMW1gDRnsEf4gEApUVmFFtFN5pMfKWA6EBUoY15CaLQoaAUoRvsoemwmbB6vhMA0NFLSqlCYF+EyshhEx/2hbZqJFfek0zOgbBSaioQwmSGMu9CZ8CTeDKXznYEMVNglfG0NjkHwil5yQLjo1PaKaWA8MRaj6BULvsR6sMIPWBFAapKLCixJrbe1St9TxAEDDJPqQRG58OTfgSkoMxshAWMOA4osZjk3ylTRX9kBb+xAhvzEkShQ0EpQjfYoLuy2IKVtaUAgIO9pJTKdwRBQKsc0CmHw1p4Qan+MS9Oj3ph4IDV88rk14stRlilVFLXLF9prHbYNN2OIGYKrG9gHnRawgJdY95gwkklU0qVFWmllNLP7DyX/Qj1YYQesODt/AQm5wyWvtetsVLK4wvCHxT7hlij83K76HMkCLNb8c3GoyVWEwwGLhyUynCcGqWUmiKlFEFkEwpKEbrhnpCqF5VYsbJODEp19HkgCOTvkM/0jnox4PHBaOCwqt6JUslTpZCCUix1b1mNI2qlk+M4uZrWbB7QAcD6hgq5ak08OIhVbdY3VGTvpAgiD5CVUiXaK6WcRWZIFncYmYw/6WGvl2mklGJmzXp4SrF+JFGSo579SC6PTcxcUlXeY8zTSSnF/KRKrCbYzMao9wwGTg5sz2azczYeLbWJfWSxVbxO5ClFEIULBaUI3RiK8OVYPKcYFqMB474gunWoVEJoBwvorKh1oMhilJVShZRfv/fU9NQ9BptouiZm74AOAIwGDt//UGPc99gkb/sWKodMzD7czM9Fh/Q9o4GTzdPjVeCb8ofgk1QS5RodX8/0PaOBw/YtuelHcnlsYubS5ZZMzisTm5wDYaXU0Li2laWHEpicM8jsPKxiYvYSWnlKjZOnFEHkDApKEbrBlChVJRaYjQYsrS4BQCl8+c6+mIBOCfOUyvBhn03kynvzy6e9x1JyZnv6HhBODYidstU6bbh/6zo0N9Vl/6SIGUeIF7D7mAtP7uvB7mOuvK+GFvaU0j59D0hegW9kSnzNZOBQbDFOez8dmOKjb8yr6eSZ0dxUh/u3rptWqTAb/Qg7ts0cPZylPoxIF6VKqTK7WS4Ec1rDFD7Z5Lwkfv9TRVWEpymlmCJ+IkOv0Eh1FCmlCCK7JHbwI4gMcUmrOExqvKLOgfbeMRzs9eDqM2tzeWpEEmIDOqUsKDVVGA/oYIjHge5RAPGVUix9zzXL0/cA4LG9PQCAG9bW4+1OF3pGvLjrmhX43EWLSV1AaEJLWy927GqPqpJW57Rh+5bGvA0YyNX3dEjfA4AKuwXHMYHhOH3Q8ARL3bOA47S5ByuKLSi2GDHhD6F7eEpeINKS5qY69I56sWNXOwDg8xcvxjeaV2SlH2luqsNF73XjhYMDAIAvX74UX7lyGfVhRFrIQakklfcA0Q6gvqwIRwbG0TMyhcVztLmvmAIq1k+KwV6fzel7TMVUWiSOT0s08j6NTN8jTymCyC6klCJ0wzURbRbLzM47+kgpla/4gzwO9EQHdBwFppQ61O/BVCAEh9WEJXEGiZUl5CkFAL5gCE/v7wUA3Hj2PJxR4wAgllemyRyhBS1tvdi2szUqIAUAfaNebNvZipa23hydWXLckqeTHul7gBhwEo8TRynFTM418pMCxMkzS0ViqUl6MBoxiasptWW1H/GHwuo7m9lIfRiRFv4gj95RUfW0IEVQCtCnAh8LSiVSSs2h9D05eOSQPaW0Sd+LHOf6gjx8Qe2VpQRBxIeCUoRuyBWMpCAAMzun9L38paNvDL4gD2eRGQ1V4iSGyaMLJb+eKb3Oml8GQ5yJCUvJmc0DOgB4uWMAo1MB1JbacN7iStkfQ2vTVmJ2EuIF7NjVjniJeuy1Hbva8zKVjwWsK3QKSlUUS55ScdP3xH62XMOgFBBORdLD7JwRadyeqeGwWiInj3pUGSRmBz0jU+AFoMhslIM/yZCfmxqm7w16kgelyFMKGJti6XvRSqlMglI8L0zrtyiFjyCyBwWlCN2IHdivqBOVGCfdkxmvZhD6sO/UCABgzfwyOXVE9pQqkIeznH4YJ3UPIKUU47FWMXXvurVzYTRw4RVfjctbE7OTPZ3uaQqpSASIlT73dLqzd1IKkVW+Cial6RD2lJoe6Gfm584ibQNielbgY4xEKL+yH5Ti5X/r+R2JmQ0LaC6osCtKn9VVKeVIYHTusERtNxtJpJQa96WvbJrwB8GKg1uMBuk4hTHuJYiZAAWlCN1g1c1Y+l5ViRVzHFYIAnC435PLUyMSEC+gU1pgQalYo/ZYZE+pWWwSOjzhx8uHRP+Vj6ydBwCklCI0ZcCTOCCVznbZIsiH+7pKvZRSUvpevOp7TG2ktVJKzwp8jJGp3JVT9wXCQSk9vyMxs2FtR0nqHhB+bnZrqZSSxiaJlFpzSmwAgCHP7B3DjDGjc+YpZWNBqfQV/SyQbjZyspk8+UoRRPagoBShCzwvYFgaXFdGmMWuqBXVUgd7KSiVj+ztYgGdcNU6RwEFpUYnAzg2KK50rolTeQ8IK/dms1Lqrwd6EQgJaKwrxXLpnpxHSilCQ6odNk23yxbj0hzEaODgLNI2MMRgSqn4QSl/1DZasbBCTMfWM7VtOE/S9/SqMkjMfJRW3mPosZgzxNL3EhidM6XUICmlZKVUiVWsgjiRgVKKjXEdNrO830IY9xLETIGCUoQujE4FZK+Qcnt4cN1YR2bn+crwhB8npAHZmnll8uuF5Cm1r3sEgJiqksgPhvkxuCZ8EIT887PJBo+3dgMAPrKuXn6tvixcNj4Y4uN+jiCUsr6hAnVOGxIlwHAQq/Ctb6jI5mmlZFyag5TbLXE96bRAVkrF85SSAjtaB8RY+t6p4SnwOvl4jUam72X5eRGZvicIQPcwqaUI9SitvMdg6XtaPTcFQZCDTYmUUmwMMzzpn7XP6rCnlJS+Z2FKqfSDSCzQVWI1yQqsQhj3EsRMgYJShC6w1L1SmwkWU7iZMV8pMjvPP5if1OI5xXBGpI44NCq1mw32sfTD+WUJt2HBKm+Ax6R/9q2mn3RNoLVrBAYO+PBZc+XXqx1WmI0cQryAvrH8SqkiCg+jgcP2LY1x32Ohnu1bGvOuStp4QDyfqhJ9UveACE+pOEqpYTl9T9vj1zltMBk4+IM8+nVKmcytUkqcnJuN4u9HvlJEOrDqlKxaZSqqHeJ9FeIF9HsyVy55fEH4pbacyOi83G6BgRODr7NV8T0mK6Vi0/cyCUoxpZQpQilFQSmCyBYUlCJ0gfn1xD5UWQW+jl7PrFWp5Cty6l5M2pujKByUyvffbO+p6emHsdgtRtjMYtc3Gwd0j+8VDc4vWFqF6tJw6pTBwKHOSb5ShHY0N9Xh/q3rYLcYo16vddpw/9Z1aG6qy9GZJYal7+lVeS9y38NxjM5Z+l6Zxp5SJqNBVnXoEbAJ8UKUqiD7nlLiAsPiqhIA5CtFqEcQBLndKE3fMxo41JWJz1Etnpssda/EakJRTL8ZeUxWhGG2pvB5ZE8plr6XefW9yKAU81JliiyCIPSHglKELrgSlNReXFUCs5GDxxdEt44T3xAvYPcxF57c14Pdx1x5WXZcazL9znslpVSsQThTSgV5Ad5A/krFBUFIWXkPADiOk833Z1v1GkEQ5KBUZOoeQ4/y1tkkxAt4u9ON94Y4vN3ppvs+D2huqotSLq6qL8Xr37w8LwNSAODJRlBKUkGN+4JRXkhA2Cxc66AUIFYUA4AuHYJSY1MBRK5Z5EopdUaNGJQipRShlkGPD94ADwMXTstTAntuntbguTkkL+gm73/Ygu+gBuqsQiRWKRWuvpd5UKrEaialFEHkAFOuT4CYmYRLakc/WC0mA5bMKUFHnwcdfR7MV7gapYaWtl7s2NUeVY68zmnD9i2NeTsRypRMvzPPC3L6XmxAx24xwgABPDiMeQMJV+9yTefQBEanArCYDFhRW5p028oSC3pGpmadUqq1awQnXZOwW4zYdGbttPf1KG+dLaLvASP+eORduu/zhK4If58JfyjvUvYiGQ+y9L34qTNa4LCZYJRSfkYmA6gpDfepstG5xul7gOiT89oR4KRbe7PzWNP2bAalBEGQg1LLahwAekkpRajmpNRm5pYVwWxUvmYv+jG6NVnMYQtlqfofFrQamoVVhHlekPuXUtt0pZQgCOA49c8YVrmv1BbpKUVKKYLIFqSUInTBJT1YK4qnP1iZ2bkevlItbb3YtrM1apIGAH2jXmzb2YqWtl7Nj5lrtPjOx4fG4fEGUWQ2YnmNI+o9juMgLUbl9aoRC6qtqndG+ZjFg6kgXLNsQPf4XtHgvPnMWtgt09ckClUpRfd9mHz7zoEQj9Mj4XM8PTKV12nAE1lQShkMHMolJVRkYFwQBNnoXA+lVLgCn/YBG6bwskiT+fEspnv7I8yel8lKKf2qDBIzE7Um5wy2mKOF+p8pn1IFpZgJ+mxTewPAuD8oqzJlTykpKMULwFSalTfjeUqR0TlBZA8KShG6wAbalXEG9szsXOsKfCFewI5d7Yg3DGav7djVnnfpLZmg1XduldLeVs1zwhRnhbBIWsgfzeP8+r0KTM4ZLH3PNYuUUv4gj7/uFwMVN8RJ3QMilFIFFJSi+z6afPvOp0emEOIFWE0GcJxYYCCf7zuWvher8tWasjgV+MZ9QQTjVK3VigXSZFsPFRFTeLE+JMgLURXx9CTyOGdIiyp6VhkkZiZdUiBzQYUyk3PGPA0Xc2SllCNF+p5DCkrNwvS9MRYANxlgM4uDU7vFCCaOSlelKafv2UzhqtN5POYliJkGBaUIXWAKlHgD+0izcy3Z0+mephqIRADQO+rFnk63psfNJVp951ReTEUFoJRSYnLOYO3SPTF7BnQvHxqQUoWsOH9JVdxttBxcZwu676eTT985Un1QLU2k8jk9lFXfi7egoiXMVyqyAh9TSVkjJltaIntK6RCUYqbtc8vCxROylcLnk7wOOU40qGZVBqmKKKEGlr6XrlKqZzjz+4oFpeaU2JJuN2cWG53LJue2sNqb4zgUW1gKX6ZKKbOswMrnMS9BzDQoKEXogmuCpe/FUUpJfj+drglM+rUbtA4oLHOtdLtCQKvvLPtJzY8f0CkyiivO+ZpfP+UP4aAU5Exmcs6Yjel7j7eKBufXralP6OnDBtf5nmIVCd33mW+nJ2yit6CiuCDSQ8dlpZR+nlIAUF4srsRHKqVYUEoPlRQQDkqNTAYwOqXtZIul71UUW1Es+Q6OZ+l5wczirSZDVJVB8pUi1CAH0FV6nUb2a5k+Nwc9ktF5SqUU85SafUEpppRiaiZGsTWzfscTYZ7Oqvrl65iXIGYiFJQidMElVxCZPrCf47CiqsQKQQAO949rdsxqR/KVJbXbFQJafOcJXxCHpFTKRAEdm7Ron6+rRm2nRxHiBVQ7rKhzpr4mTAWRz2lEWjI6GcBLHQMAgBvWxk/dA4A6Z1FBpFhFQvd95tvpSTglxo65ZflvpD8uzUH09JSK3L97ItynMrNwPfykALFCFXsma12Bj6XvlRWZUWLLvBKWGlj6ntUkPqj0rDJIzFxOsQC6SqVUnaQO9Ab4jIunDCo2Omfpe4XxnNaSSO+nSEoyrMAXrr5nIqUUQeQACkoRusAezIkG9islXyktzc7XN1SgzmlDopobHMTKVOsbKjQ7Zq7R4jvv7x4FLwBznTbUlMafxLL0vXzNr9/bxVL3yhRVXWHpe65Zkr739IFe+EM8VtQ65PTZeFhMhoJIsYqE7vvp5NN3jkzfy3fPMl+QhzeUnfQ9poaKrFrH1EZ6BaWAcGqS1hX4wiovszw59GRLKRVgQSlxSKvXdyRmLuO+oLwQs0ClUspqMoafmxn2bUMKjc6rZrHROTMfZ2omRmQFvnSIrOgX9pSioBRBZAsKShGaE+IFeaCdyCw27CulXVDKaOCwfUtj3PfY5G37lsa8LkeulmTfGRC9ZVJ9ZyVeTEV5rpQKe2Kl9pMCwkbn7lmSvseq7n0kgcF5JIWQYhVJqnsAmLn3fbJEkXz5zl0R6gPmWaZFlSo9YIspJgM3LTVEa9iCTVRQSvq3Xul7QDg1SesKfGGVlwUl0rXLnlJKSt8zS0EpHasMEjMTVq2xotgiV15TQ9hXKv2+TRAEOcjEglyJmCO97570IxjKTkGBfCGRUqqYBaXStAWJSt+LUHsWipUBQRQ6FJQiNGdk0g9W9CbR4HpFLVNKaWt23txUh/u3rpNXTBhVJVbcv3UdmpvqND1ePsC+syVO1TyTgcOqeWVJP78vhck5EKGUyrOgVIgXsPuYC28ecwEAzqp3KvocmxAOTfhn7ICDXZv/eb0T75wQA48fPktBUKpcnLQWilIKCN8Ddst0c+gvX7F0xt73m86smfZ6ud2cN32dIAhyUGphRf4rpVhQqtxuhkHngB57NrrjeErpqZSSK/BpHLAZjVB5OeQ0muw8L6al7+lYZZCYmbD7Qa1KiqHFYs64Lyi35VRKqXK7BQYOEARknDJYaCTylMpUoRlVfU9SYfECMOFPzzidIAh1UFCK0Bz2gHQWmWGOEygBwkqpg31jmgcFmpvqcOXK6qjXvr15RV5M0vRi05m1ssnj/7tqGf502wZsaChHkBfw8+cPJ/ycIAjYK5mcr5lflnA7m2R0nq10DCW0tPXiwrtfwid+85Y8IfrKX/ahpa035WeZgs8f5GfkgCPy2uz4azsAwGI0YJ+kiktGoSmlGM1NdThT6lcuqOFxxQqxwmC3u7C+h1J4XkBbj6g0/ecrluI8KVXvk+ctzJu+bmjcj0l/CAYOmFduR30ZC3jmZ7CAPbv0Tt0D4iulItVGerFQp4DNcITKS/Z2yZrReYL0PVJKEQpJt/IegwXcM1GBDknK7WKLEUVxFlgiMRo4VBTPzgp8Hl9yT6l00/fC+zXDajLAbBQXJiiFjyCyAwWlCM1hD9ZEqXsAsGROCUwGDh5vEKeTlDZPFzbgrpLOoaNfW0VWvjHo8WF4MgADB3zuosXYuKQK394spjQ9trc7oXdXz8gUBj0+mAwcmpKojMKeUvnxcG5p68W2na3ojWk7A2M+bNvZmjIwZbeYUCSVXHfNsAFdomvjD/GKro0Wg+tcIAiCfJ9fUMPj8xctBgC0fNCnaZXPfGHPCTd6RqbgsJrwxUuXYlNTLQDgUF/+9HVdkqdPnbMIFlO4KtqYN5iXqcCpvBC1pJwFpSKMzmWlVJGOSqkKnYJS0vdw2sNG555spe8FwtX3AGC+pPYcnQpgdDL/2hmRf6RbeY8xT4PFnEHmJ5UidY/BxrdDs8SGgJG4+l76QSlfMAS/FNx22EzguHAKdz4txhLETIaCUoTmKFlttpgMWFpdAgA4eFo7XykGG3Bf1ShO1Do0ThPMNw5KE9FFVcXyCttZ88tw7eo6CALwk2c74n6OeTE1zi2FzZx4ZS7sKZX7h3OIF7BjV3tcPx322o5d7QjxyRV4FTOwAl+ya8NIdW20GFzngp6RKXi8QZgMHGqKgDXznVhYacekP4TnP+jP9elpzuOtPQCAzavqYDMbsaJW8unr074/TZeTMSkxJVYTnFLAJR/blyuLQamKuOl7+ntKLZD8lk6PTsleTFrA1Kq5VUqJD6qoKoOUwkcoIFx5rzitz2vhKTWksPIeg/lKMXP02UIqT6lxn/p+LXJsW2wxRe0/32wrCGKmQkEpQnNYRTNmJp0I2exc40nUhC8orxwxzxUtq/zlI+z7xVZW+8am5TAbOfz98CDeODo07XP7pNS9tUlS94D88pTa0+mepgKKRADQO+rFnk530v2wVcaZZHauxbUJD64LazLH/OmWzCmGyQBwHIfr14geWo/t7cnlqWmONxDCMwdExdv1a8XvyCqannJP5Y0KKbLyHkNOD81DJZ5bUvtkRyklBuemAiFMSSnEw1nwlKoqscBuMUIQtFND+oO8bGpeVmSWJ3PZMzoXg1IWU3hISxX4CDWwdpJ2+h5LTc4g2M6CUnOUBqVKZmf6XqLqe+F+R/3zjwXQS6wmuUAI23++PE8JYqZDQSlCc1zSJL8iSfoeoJ/ZOVsZLbObce6iCnAcMODxzbg0rUhYFcOV0jVlLKwsxic3LAQA/PjZg+BjFDJ7u1JX3gOAojzylBrwKEv3TLVdWCk1c9qFFteGBQ3yNcUqEeweWBFxD9wgBWxePzKIgTHt04Rzxd8O9sPjC6K+rAgbJC+pMrsFdU4bgPxJ4YusvMfIZ7NzVxY9pUqsJtmzhPkxhc3C9Ts+x3HhFD6NPJfYeXOcOJHLtlLKH+MpBehXZZCYeQRCPE6PiM+HtI3OpX5tdCqQdjB2SE7fU3b/V81SpdSYN+z9FEmxlCUwkYFSKlJ9JSulpnI/7iWI2QAFpQjNYZP8qhQD+0izcy2J9AYotprkwWlHnkzU9IB9t1ilFAB8+fKlKLGa0NYzhl37T8uv+4IhtEmpk8lMzoH88pSqdtg02a5SWmWcSel7WlybYqtJVmrkY+AgEawfWV5bIr+2qKoY6xaUgReAp94/neijBQdL3btuzdyoKnFyoD9P+rpw5b1wSkx+K6Wyl77HcZwcfGLHDZuF66eUAiKNwLVREbG0Q2eRGUYDl31PKSkN0RqRgq5XlUFi5tEzPIUQL8BmNqBaoZ9TLFGpyWn2bYMq0/fCnlKzKyjFFstKE6bvqe932D4jK3eHPaVyP+4liNkABaUIzVE6sF8hpZucGJqQ0xe0gJnrMm8A5rUyU1P4fMEQjg6MAwBWxAlKVZZY8YVLRNPne54/JA/gD/Z64A/yKLebU0rWbdJYf8IfQjDEa3j26lnfUIE6pw2JCrZzAOqcNqyXFCSJYGoI1wxK39Pq2uRz4CARzDduRYxa8IZ18wAAj7XOjBQ+17gPfz88CAD4yLr6qPfkQH+e9HXx0vfmMSP9PAx4hj2l9A0KMZiv1PCkHzwvyIojp+5BKfHZeFIjv6WRqWiD9lxX3wMofY9QDrsPFlTYwXGJnp6pCVeuTe++GvSI/Y/yoJSklJpBYxglMOVSrFKqJJOgVJyKfmFPKVJKEUQ2oKAUoTkuufpe8gdrtcOGqhILeAE4rGF1vNgqKiz4pXWaYL5wbGACQV5Aqc2Euc74Cph/vLAB1Q4rTrmnsPOtLgDAvojUvVQDsaIID/Rs+YQkwmjgsH2LWFkw9qzZ39u3NMq+AIlg1SHdM0gpFXltYlFzbeoLzOx8yh9Cp6T6WFETHZT60Ko6mI0c2nvH8iatLRN2vX8aQV7AqnonllZHf1cWlO7Ig6CU6O0nruAvKBhPqeyl7wFhXyn3hB9j3gAEKbu6rEjf47MUpVMaBaWGpevGlF9Z95QKTA9KaZ2iSMxcuqRnx4KK9EzOGZmancueUgrVWmy7wVmWvicrpYqilVJMoZlO9T1PnJRAppTKBy9VgpgNUFCK0Bw1vhx6qJi6Ila9AO0M1UO8gN3HXHhyXw92H3OlrO6WLdi1W1FXmjC4ZLeYcOdVywAAv3zxMP52sF9O5TtrnjPlMYwGoMgsdhf54CvV3FSH+7eum7aiWOu04f6t69DcVJdyHxXFbJVxZg3o2LWJnKAB6q7N3DwOHMTjUL8HgiCmM8QO6MuLLbhseTUA4LG93bk4PU15XDJtZ35ZkTRKAfiOPs80/7hsw/rhcrs5qnR3vnpKhXhBntydktJ59IapiYcn/LLJebHFGGXYrQcLNPZbGokxaC+xiv9PZ3KYDnL6nikifU8KMPSOedOqMpivz/tItD7HQvjOWhPiBbx13AUAsBi5jL4zC7inqwJVW30vrJQqjDGMFu3LFwzJyshpnlJapO9FKaWkoBR5Ss1a9OgTZ2M/qxRT6k0IQh1y+l4Ko3NArBj1+tEhTf2eYs11V0qBryP94wiGeJiM6gf8LW292LGrPaqyWZ3Thu1bGhVN8vWEBdsa46TuRfIPZ8/DfX87gr4xLz73h3fl1/+w+ySW1zpSfo9SmxlTAR9GpwKYn/lpZ0xzUx3mOGy48f43UW434z8/eTbWN1SkVAExZqJSirHpzFo4bCb4xv34yhVnYMPiSlXXJp9TrOIRNjmPfw98ZF09nm/vx5N7T+Mbm1Yovg75xrHBcbzfPQqjgcOH18yd9v6iymJYTAZM+kM4NTwpp2nlAhbwiDUOZhO3QY8P3kAItggfoFzR0taLHzz1AbzSZOfr/9eGn//tqO79eznzlJoMyL5MepqcM1hqW5d7EjwvRPmSpcPIFPPCEs+dpdFkz1NKUkqZw892VmVw0h9C9/AUlswpSfTxaeTz856h9TkWwnfWmtjv/ExbH/be/VLa33leBkopQRBUV99jQSn3pD/tsW220Kp9sUVRjgMc1hillDV9pRRLNY70qWJKLPKUmp3o0SfOxn5WDfnbgxEFSYgXZLPWyuLUD1atlVLBEC8PCNjAe155EUqsJvhDPI4PqfeXaGnrxbadrVGdCAD0jXqxbWcrWtp6Mz/xDDiYwEsnlr8d7EdfnApkwxN+Rd9DNq/NA6UUgw0Wap1F2LikUlWwgSn5ZmJQqmdkCkPjfpgMHL5w6RLV1yafU6ziwfqPlXXx74HLVlSj1GZC35hXXhUvRJjB+cVnVMVdTTcZDVhWI06+c+0rFevtx6gotsAmBQ9i+9RcwPr3vrFotUE2+nemlBqZ9Mtqo/Is+FnNLSuC0cDBF+QxoEHqz3CMUkpO38uapxRTSoWHtOlWGcz35z2g/TkWwnfWGj2+cyZp7+O+ILxSGqrS6nsVxRYYOEAQxMBUvqLltWbFdkospmnBdG08pcL9r6yUyqMxL5Ed9OgfZmM/qxYKShGaMjzpl30xlFQQCvs9jUEQMpcwnh7xIsgLsJgMqJEqjBkMHJbXho+jhhAvYMeudsQ7M/bajl3tOZVfMqVUvMp7DPY94qH0e4RNH/Nn1Wg0xmBXDRURRudatL18Ym/XCACgcW5pWkqUfE2xSgSrNpdIKWU1GXHtalFZVKiG5zwvhFP3JPP2eKyUA/259c8KV96LVkpxHJc3Qc9c9+/lEdX3mNpIbz8pADAbDfJvoEUFPjl9ryhaKTUVyE5hjLCnVHRfp7bKYK7bgxK0PsdC+M5ao9d3zsRTipmVF1uMsFuUJbEYDVzYhsCTn0Epra81WxQtjTPmY+l7gZCgOmU3fvU9UkrNRvToH2ZjP5sOFJQiNIWZnJfbzYqkxEurS2AycBjzBjVZNWeVdhZU2KNWUVamaXa+p9Od9LwEiKv9ezrd6k9WAwY8XgyN+2HggGU1iZVSWnyP0jxUSrFVM2caQSmm5POH+Jybt2vNvlMjAIC188vS+nxsilU+IwhChFIqcWCWVapraevVtNpntnjnhBs9I1MosZpwdWNNwu1W5EkFPjl9L05lz/py8bV0q1RpRa77d9lTatKP4YlotZHehKvTZf4bsNRDpvIqjpjYTfj0v9fiVd8D1FcZzHV7UILW51gI31lr9PrOzItxwONTHRSR/aQUmpwzqiQbgsE89ZXS+lqzRdHIKnmMYks4KK223wkbncfzlKKg1GxCj/5hNvaz6UBBKUJTXBPig7FCYfUiq8koez1oMYmKrbzHYAoKtWbnAx5lgTKl22lNhxRkW1RVjCJLYkWMFt/DYc2/BzRboU8nKFVkMcIuXTPXDCupvFeqrLhmQVlan8+3FKtknB71wuMNwmTgsKQ6sYfSOQvLMb+iCBP+EJ5v78viGWoDU0ld01SbVP22MsLsPJckUkoB+ZMemuv+vVxOIQ5gZCq7QSktK/DF9sMWk0E2a/f49H9eyOl75ugh7XyV6Xu5bg9aHjtX2xUCen3nysjn5oi6zw551JmcM1hhj6E8rcCn9bWOFzximIwG+fqr9ZWKW32vKP8WYgn90aN/mI39bDpQUIrQFDa5r1TxYNVyEsUmQvNjJkIr01QPVEspgFptpzVy6l6CtCWGFt/DkYcP6NEMJ3LM7Nw1g3ylfMEQ2k6L7WLt/PK09pFPKVapOCh91yVzSqal70TCcRxuWCOqpViAp1DwBkJ4+oDoN3DDuulV9yJhfUGXezJnaQfR3n7TA4X5YqSf6/69wh6uvierjbJgdA5EprZlHpQajnPujgz8XdQSVkrFpO9VqFOD5bo9aHnsXG1XCOj1nTmOC1euVdm3DcqV99Td//legU/ra80WRUtt8cd8rPKn2nFq/PQ95imVPwuxhP7o0T/Mxn42HSgoRWgKM42uVKiUAsLpJu2aKKXE9L2FMSkjzFOqf8ynyth6fUMF6pyJOwkOYuWE9Q0V6k9WA5SanLPvkcjqWsn3YJOMfHpAM3VBPH8BJTA/BleeDujS4WCvB/4gj3K7edp9oIZ8SbFKRdhTLfk9AIS9mF47MoTBPF1ZjsdLHQPweIOoc9pwXkNl0m3Liy2oLRX7rMP9uVFLRXr7VcdJR8mXgKcW/WImsHQ396RfNgtPR/WZDgtUBmySMTI5fXGgJItm52FPqdj0vbAajFfg1ZHr9qAErc+xEL6z1uj5ndPt29JVSrEgVr4GpbS+1smUUgBQYhUD0xN+df0OC56XRqXvif/2Bnj4g/p74xH5gR7zvtnYz6YDBaUITWGKE6Xpe0BYxdShQVCqyx1deY9RYjXJg3A1xzEaOGzf0hj3Pda5bN/SmLMS80q8dIDo7xF7pkq/Rz6aPmaqlKqagRX49kmpe2sXlIPj0m+X+RI4SIUcmE1xDwBAQ1Ux1swvQ4gX8NT7p/U+Nc1g5uzXramfVnEoHivS9NDTikTefox8MdJn/WK8cEU2+nemLPIHeZyWrkW2lFILKkQFW5cWRudT05VSTHHgyYpSanr1PUB9lcFctwclaD0mSbY/Rq6/s9boOa5LVwU6KGUZzFHpKcW2z9dFFq2vNVsUTbQQWZymQjNe+l6kaiqfxr2EvhgNHL7/IW37h0J4tuQDFJQiNIUpTlSl70kqn86hiYxMlQVBkAfYbMAddRw2UVOZJtjcVIc1853TXq912nD/1nVobqpL42wzxx/kcWxwHEB4EpqM5qY63L91HWpjVgCUfg+5+t5UHqXvZagukCvwzaCg1F7J5HxNmibnjHxJsUrFQQXVJyNhhueP7+3W7Zy0xD3hxyuHBgCEzz0V6aYra0Uibz8GC3j2jXpzXm2muakOV8Uxjs9G/263GGXvpeNSX87UU3rDDOiHJwMZqV+9gZBcyj5KKWXNolKKpe/FeK2lU2WwuakOn964cNrruX7eR8Ke5bHzl3TPsbmpDr+6ed2010usprz5zlrT3FSHf7muadrrmf7OaSulxtNVSrH0vfwdw7D2GjtOS+dap1JKFafZ77D9lkTs12Q0yObp+WRbQehPogXdmgz6h+amOnz+4oZpr+fTsyXXKKs7ShAKSSd9b47Digq7Ge7JAH7z2nGcs7AC6xsqVEeMXRN+TPhD4DhgfkXRtPdX1JbiuQ/6VU/UgiEexwbEAe3VjTV4vr0fy2tK8MwdF+c0qn1scByBkACHzSQPhFIhTsBqsafTjQGPF9UOm+JrzVaQsmFcqxRZKZVmGfUK5iml44AuxAtpXe902ds1AgBYm6bJOaMQlFJT/hBODIn35soUKayMD62ei3/Z1Y62njH877unYDYZsvK7qIW1m/9r7UaQF9BY50haYTMSls6bK7NzZp4dr/IeANSU2mAycAjyAgY8XtQ5lfVfesFUStsubsBE71FcfdEGbFxarXt74DgOFXYL+sa8Eel72VFKlVhNqCw2wzURwIO7T2LdgvK07gGWumcycFHKAjZpzK6n1PR11oWVdnS5J3HSPYkNi5OnvjJYWviVK6vxt4NiQHjX7ReqroymJ1c11kKIiOduWV2H+z6+Nu02u1xa2DIbOXz83AV48K2TKLObsOnMWi1ONy9hRtZL5hTjn684Q5PnAEv7eb97GLuPuRTvbzDt9L389pRiNDfV4f3uEdz/ynEAYv/w+jcvV32tU3lKMZsJNUbnPC/I/VRssKu0yIwJfyivbCsIfQmEePy0pQMAcPtlS3DB0ir84+/fwVSAx3/dcjZWzytLe99Wc3T7un7NXNz7sTV5NfbMJRSUIjQlbHSufGD93Ad9GJfKt977/GEA4oN9+5ZGVZFjtjpfV2qLa3gspwmqrMD3fvcIPL4gyuxmfG3Tcjzf3o8TrknwggBjwgxh/ZFT92pLVaVpGQ0cNi5RNjiPJC+VUlOZKaWqJE8p94Q+A7qWtl7s2NUeVcEunbatFNe4D13uSXAccFaGSql8SbFKxuF+D3hBDIIrTXuoKLagsc6B/T1j+Nr/7pdf1/N3UUu8dnNqeAotbb2Kzi8yJZrnBUUpf1qSSillNHCoddrQPTyFnuGpnAalhsZ9+EAyy//0xgV4+9Uj2JDFAGV5sRiUkv/OUvW9lrZejEmr/z977hCA9O4BZnJeZjdHPYeyq5SKn74HqK/Ax/MCXj8yBAC47aLFOD44geNDEzjQM4rLVlRrdMaZMzzpj0oFGfUGM2qzrJJvY10pvr15JR7f24PuYS/ePTmMcxfNTJ8TtoBz8bI5uG6NMhVqMlraevGjZ8TJ7NGBCXziN28pvqdYUGmOY2YZnUdyYih8D3q8QYR4QX1QKk6aXSTppO+NR/hPxQalHDYTekdJKTWbeOSdUzg+NIHKYgu+cOlSlFhNWD2vDG93unGkfzyjoBSzj2moKkbn0AQ8GfbbMw1K3yM0xSVN7pV6SrW09WLbzlb4Q9Emgn2jXmzb2YqWtl7Fx+6SfExiK+8xWPre4f5xBEPKTQtfkwaoFyytwtI5JSi1meAL8vIgLlcwFYQSg2ctyEdPKeZlko/pe6xtRwYWgPTatlL2Sal7YjvNbHKbTylWiWAB5hV1DsWB2Za2XuzvmR6Y1vN3UUOiduPxBhWf3+KqYliMBkz4Q+jOgdKNmWfHq7zHqE+zSpXWvHFU7N8b60pVpZ1rRUVMul5ZFjylWBsLhKLv63TuARaUiu2DWRpMVjylAvGr7wHqK/Ad7BuDa8KPYosRaxeUY42kON0refXlC7FBiExTdSP9KYssRjQ3iQop5mc3E2Gp7msXpFelNhJ2T8WOJZTcU4IghINSJeqqb7HFGNeEX9W4NhcwuwnGYBqBtLCnVPL0vQmfcisQFnCyGA3T+hC5At9U/ox7Cf2Y8AVx39+OAAD++Yoz5MUVrSwRmN3EDWvrNdnfTIOCUoSmsAeyEglyiBewY1d7XOM39tqOXe2KJ8Ty6nyClJH55XYUW4zwB3l0Dik3d2VBqYuWVsFg4LBGGsDsO5XbQSrrzJQYPGuBrJTKkxWjSC8TZ5rqgkqd0ve0bttKYSu/mfpJAdEpVv1j3tQfyAHMyHtlrbJ7gP0u8dDzd1FKsnbDUHJ+JqMBZ9SUANCmqqkaIr39Ei0QAGElXi6CZpHI/fuyqpwcP9IcnOP0r76ndd/EfP1iDdpZafbsekrFT98DgC6FQSnWHs5bXAmLySAHLFgAI18Y8ojPrHnlReA4Mf0rE7WMHOCXUn8/Ik2ant5/OiOvz3zFGwih/fQoAGBths/LTO+pCX94LFOlUilVUWyBgQMEQazima8EQ7yslLIYxft0II1xRTxD8kjCacPKg0hsoTWeT5VDXozNj3EvoS+/ee04hsZ9WFRpxyfWL5Bf18ISweMN4JRUjOt6SZl5etQrP0MJCkoRGhIM8bK/hBKl1J5O9zQ1QCQCgN5RL/Z0uhUdvyvF6rzBwGG51LEonaiNeQOy+uTCM8RJC5vwswBArpAn5FkLSkmeUt4ABCH3yhm2csVxYR8BtVQWs1VGbaXvWrdtpew9Fa68lyksxQrIvZolEWoDs7n6XZSi5fmlm66cKam8/Rjz8kApJQgCXjsyCAC4aOmcnJxD5LOy1GbWXcqv9T3AvLBiK6CmMzlMl2Tpe2qrDMrtQXres4DFvlMj4PNIMTo4Lv6GCyrsshosE/V27HjivMWVqHPaMOYN4uWOgQzPNv/44PQYAiEBVSUWuahHumR6T7FFMbvFCLtF3VjGaODkPoQFKvOR7uEp+EM8rCaDrO7vH0tDKSV7SiVQSllYv6M8kDqexDydVfkjT6mZz6DHh/96VfQ8+/qmFXIREiBaKZXu/OeQFNCqKbViQaVd7ncOZnmMls9QUIrQDLZKw3HKyloPeJStkijdjnlGLEiyOh+eqCkbvO0+5kKIF7B4TjHmlYv7ZQbSuVw5ZauiHAcskxQResMe2IGQIK/q5ZJIP6l0PXOYUso94dc00KZ121ZCiBfw/ilp5TdDk3MGS7E6nYdBKUEQIlJOlKWw5uJ3UYOW58dW9rItD0/l7ceQPctyqJQ6MjCO/jEfrCYDzlmUeSA3HSKflbGBHT3Q+h5gKdSxaYclaaTRpIMgCBFG59Pbm5oqg1P+EN45IQb2L1omBimX1zpgMxvg8QZxfGg82cezCgtAVJVYMw5Aj04F5ODwCkl1ajBwss/SY3tnXgofS8dcM79clSdnPDK9pwbTrLzHKARfKZa6t3hOibzYNZjGczasakrkKSX2AWqMzpOpr/ItQ4DQj1+8eBiT/hDOml+GzauiCzwsq3HAwImLbumknQLhyu+sv2Z9bQel8MlQUIrQDFZ5r9xuUbTaW+1QljuvdLuwj0nioNQKlXnB4VX0cGrHGsnkrnNoAsM6eBEpgQ0+GyqLVa+spUuxxSiXoM4HX6lMTc6BsEohEBI0HXRo3baVcGxwHOO+IOwWo+IqbanIlxSrePSOejEmmUQurVYWmM3F76IGLc+vUWUAXitSVd5j1JeJ7+dSKcVStTYsroTNnDiApieRSqls+ElpfQ+MyOl7MZ5S1ux4SgVCglyFLl76XonVhCpp8SGV2fmeE274gzzmOm1YXCUqrMxGA1bXlwEAWnOsjo4kbIxtlSc3B9NUSrEV/PqyoqhU+I+sE4NSrxwayNlYRy/CflJlGe8r03tqaJwFGNO7/wsrKFUsX4cBj7rz5XlB7k8SeUqVpGF0zoLVJXEU9+QpNTs4PjiOP+05BQD49jUrpgWqiyxGLJKeCekqUlnwifXXbDE1V1WS8xEKShGa4ZYerEpNztc3VKDOaUtYv46DWA1ofUPqyi+T/qBcUndhRWJz3UbWCSjsVGS/kTPCqR3lxRZ5wLqve0TRfrSGnf+KLJmcA2L5craSlA9SZjYZKssgKGUzG+WBiFvDQff6hgrUliZe9VTTtpXCVn5Xz3NqlgKUDylWiWCB5SVzipMqciLRss/RA3Z+iVBzfiwAf9I1qWqAninhynuJ+2EgWimVq3TgeIsO2aY8Miils58UoP09MDKZQCnF0vd0flaw1D0gfvoeEPY2O5kiKPXaYZa6NydqUsICF/vyyFcqUl3DJjfpqiLlNOja6PHEshoHGutKEQgJ+Ov+0xmcbf6xTwowZuonBWR+T7kiAozpwD43qDLIk02OD4rps0vmlKBaOl+1XpUT/qAcgE5UyEXud9RU3/MlTt8L21aQUmom87PnDiHEC7hiRTU2LI5fnXxlbWZm57HKfq3M02cSFJQiNGNImtRXKgxKGQ0ctm9pBIBpD3P29/YtjYom2MxPyllkTmp6zRQkfWPelCt/Xa5JnHRNwmTgcN6S6E4q175Scuem0OBZK9jqVD5ImZlSqjTDiRwLoro19JUyGrioQGYkatu2Ulhb1MJPipEPKVaJ6OhT76mWrM9haP27qMFo4PDFS5fGfU9tu6kotqBGCoweyuJK3EmpCmoqpRQLvk0FQrIvUTbxBUN467gLQO5MzgGgIiKYE6s20gMtn7tAEk+pNBQL6cBS94CwgXIs4Qp8yX2lXj8a3/Q+18/7eLAARFWJRe4Djw6MI5BGBTamvI7XlzK11OMzKIVvYMyLnpEpcBywWoOgVKbPlbBSKt30PclTqgCUUkvmFKOmND2l1FhUlbz493q4+p426XvhMW/uF2IJfXjv5DCebeuDgQO+ec2KhNtlomzieUEeh4XT98T9Her35G2F62xDQSlCM9zSA7FShQS5uakO929dJ+eYM2pKbbh/6zo0N9Up2s9JBX5SgPjQYea7qczlXjsqrpquW1A+Tda7NsdlollucrYq7zEc1vxZNRrRIH0PCAelhjSswDfmDeBvB/sBTD+/Gqe6tq2UvRqu/DLyIcUqEe0xUmilJOpzAOAT6xek/buEeAG7j7nw5L4e2YsuHfacEM1wLTGD7to02o3sWZBFI00l3n6AqFJkK/y5CHq+d2IY3gCPOQ4rlmuU7poOkWkoU4FQVganie6BdNqYrJQqSqSUyk5QymoyJPQGWlDJzM4TK6UGxrzo6POA44ALlkQHpVig/1DfmKrJbiRa9Q8MOZDhsKK+rAglVhP8IV5WpKihPYny+sNnzYWBE1MXT6ioWpzPsNS95TWOuClb6ZDsufKTG1clvacGMw5KsfS9/E2xPBahlJojLZYMqDQ6j6ySl+heL0krKJWs+p45apt8QGlfonWfM5OQr83eHnz7sf0AgI+ePT+p9cWKDJRSp4YnMeEPwWI0yJk2CyuLUWQ2whvgcUJhIY6ZTnbMaIhZgUtWSql7sDY31eGqxlrs6XTh8zvfw9hUEPfdtGaaOikZSn1MAFFddMo9hY5eD85fkniF/LXDLHVv+jZskMoq8qRrtJ0O/iCPowPSILI2u5MpedUoD/LrmVIqU3Pgqgizc6144JVjGJ4MYMmcYjzzzxehtWsY//Tge/B4g/jFTWsSyoPTZdwXxGGpTazRyOQcmJ5ilakhrJZ0qDQ5jyTc57gx4PHivRPD+ONbJ/HCwX58179StU9bS1svduxqj6rAVOe0YfuWRlUT/P3dI9j1/mlwHPC/X9iICV8IAx4vqh1i6odaBdeKOgf+fngwq/JwJd5+jPqyIgx6fOgZmcSqeU69Ty2K15gqZmlVztp1S1svvv/kB/Lfz33Qjwvvfkl1u0kHdg/ceP8b2HdqFLdd1IBvXbNSdRtL5ClVnCVPKV8gceU9BlNKMUV1PFiq/qp6Z1RKJSAG6+qcNvSOerG/exQbVYxNAO36h0hkT6kSKwwGDitqHXj35DA6+sbkKsNKCPECDidRnVaX2nDhGXPw6uFBPL63B1+9alla55tPhFXFZZruN/a58u8vHsGxwYmUi3gsfa8qzfS9fPeUck/45fHV4jnFYN2t2oIiY1PMTyrxmC8dT6mk1fek1/JhIRZQ3pfo0efMFOJdGyD12Hnl3LAi1R/kpy0cJoP5/Z1RUwKTpOg1Gjgsq3Xg/VMj6Oj1YMmc7BStymdIKUVoBgtKKfWUisRo4LBxSRXOXywGgN5X6dUU9jFJPRFSYnYeDPF48xiT8k9Pw4quyJPdCPfxoXEEQgIcVlPGpYzVkk/59aPSCr1WSimXRgO6vlEvfvdGJwDgW9eshNVsxMYlVdgoBaL2d49qcpxI9p8agSCIk3wtTbpznWKVCG8ghE7pvlOTvheJ2OdU4ro19fjuhxqxoMKOQY8P//1ap6r9tLT1YtvO1mkDnL5RL7btbEVLW6+i/QiCgB8/0wEAuGFNPVbPK5PPb+OSyrRSCmWz8wxKxatBqbcfI5dG+rKfVI5S91i7iU1hUdtuMsFo4LCyTgwG2i2mtNoY6xdi0+Yj0/f09AyTlVJJjOpZgDSZp5ScuhdnEQpI31dKq/4hkhAvyJN8pjZkKqd2lQHok64JTAVCsJkNWFQZ/579yFoxhe+JfT0583/Tkn2nWOW9Ms33Hflc+fT5iwCkTn1k1hdz0lRK5bun1HEpdW+u0wa7xSSn77km/AiqSDdNpmhipBOU8iQJSuWTj6rSvkSPPmemkOjaAMC3HzuQ9NrMddpQajMhyAs4OqCuEmvYTyp6vNqYoR/gTIOCUoRmyKs9aVYQASLT4kZUfU7N6nyjgrzg/T2jGPMG4SwyY1X99BV8s9Egv57tFD7ZlLTOkfUV/tI8ekDLSqmi9NsbAFRKA0GXRkqpf3vhMLwBHucuKseVK6vl19kqzN5T2rcXLSsJRZLrFKtEHO73gBfEgGJ1mqvLkVhMBnxt03IAwK//fkzxinOIF7BjVzviTdPYazt2tSuSzb9yeBC7j7tgMRpw59XaqBHC6Xse8FmQ7p9yi20klbcfI1dG+q5xH9p6xH70ghyYnGvZbjKFBZ7Vmg4DYiB1dCpcdTcSlr4nCMCkPzTts1oRmb6XCKag7h2dgj84fRLM84KslLpwaXwvwLCvlPL+W6/feXjSL3+GLaqsTDMAzcZBy2scCYOSV59ZA7vFiJOuSbTmyLJAK4IhXl4Y0tJ/MR4fWj0XJgOHD06P4XB/4t+Fpd3NcczM6nuyyblUJbfCboHJwEEQ1KUcsnFnIpNzIKzQ9AZ4xQEv5lVVYp2+X2dRfiilUvUlAoDvP/kBTrom8P0nP8iLZ0u+kewaMpJdG47jZFGDWksEtn1sdksuLBbyGQpKEZrhlpVS6U8S5YGfyol7l5SPu0DB6jzrBA71exI+tFjq3gVLEysU2IBmb5Yr8rBBZ7oKkUxwyFLm/AlKZaqUqpSVUpkHpQ73e/Doe2JZ2W9dszK6gtN8KeVTB7NcNlHSY5BdLwcOkleuyiaR1aK0Csx+aFUdVtU7MeEP4d9fPKLoM3s63XFX3BgCgN5RL/Z0upPuJ8QLuPtZUSX16fMXYl556uC6EhbPKYbFaMC4L5iVwM9JqR9WsjgA5M5I/41josH5ilqHpspCpWjVbrSAeeAkO59ETPhDCITEAXxsUKrIbAR7dOppds7S95KlUswpsaLIbAQvAN3D0/uxjj4PhsZ9sFuMWLewLO4+Ip/3StVCev3OLPhQbjfDLKWCpOt3clCBN5/dYkJzUy0A4LHWwjY8P9w/jkl/CA6rCUt1TpepKLbg0uXiwlSi6yYIkQu6aabvOcIWBPkYbAibnIvX22Dg5O+qJhieTNHEKLaGFZMTCoPh4z4lnlL6Kj5TkaovAUTj+Et+9kpSA/lsPlvyDS3645W16SmbWPpeY8y8LVyBL3vFaPIZCkoRmsEm9emk7zFWSeXs+8d86B1VNlEJhng5/UPJZGhBhR12ixH+YGJzudePhktDJ4IZSusRZEhGugbPWsBy+Vlufy4Z0aj6XqWGnlJ3P9sBXgCuaarF2QujA0Sr5zlh4IDTo170pTEBTIQgCHJKidZKKSC3KVaJOKhDYNZg4HDXZrHyysNvd8npgclQ6omRarvH9/ago8+DUpsJX7osfvW9dDAbDVgqrU6rTetJB+bZk8rknFGfI6XUa4fF/v3iOKnZ2UCrdqMFTCmVTp/ETM4tJgNs5ujhJMdxciqNniqDsFIqcfoex3FymzwZx1eKPe/PW1yZcD9Nc50wGTgMenw4rfBa6fU7D3miU/eA8Ar8gMenKhX9YBKT80g+snYeAOCv+3vhC+qnfNMbtuC5er4zK16grHrhk/t64qpVfTwwFRDbcLpBqQq7BRwH8IK23phaEVl5j8Eqw6qpwMf6kWRKKavJKFfhVBoMTxbsYscK8YKuis9UKO0jlLbobDxb8g0t+mNZkaqiAt+4LyiPjWL9/tjfPSNT8kL7bIaCUoRmsPSnTNL37BaTPLhSmsLXO+pFkBdgMRrkXPVkGAyc3BG0x4lOe7wBtErHvjBJagdbOe3oG8OkP3tBmg7ZlDT7FaNK81AplanROVP2ZSp9f+u4Cy92DMBo4PB1KRUskmKrCculQOI+DVP4uoenMDTuh9nITVuF0YJcpVglI5EUOlPOX1KFS5fPQZAXcM9zh1Jur1Rlk2w7byCEe58Xj/Wly5aizJ5ZOmos6ab1pIPSKqgMWSmVxbYlCOFUrUT+QXqjRbvRijpZKaX+N4g0OY+nWGQqAz2VUn4F6XtAOIUvXgW+cOpe4vZQZDHK95LSFD69fuehOMqaYqtJXpRTM2FK5HUSy8YllagptWJ0KoCXOwZVnW8+sU+uUqtv6h7j8hXVcNhM6B314q3jrmnvS/FF2C1GOfVMLSajQVZ856OvVGTlPcYcqc2rCY6wAjvJlFJAWC2ltAJfsqCUzWyASQpe5tK2Qqnf2HevXalou1wohHONFv2xEk/iWA5J49Vqh1W2C2E4i8zy4lwH+UpRUIrQhkCIl4MEmSilgEhfKWUDPzYRmldRpNioNTxRm94JsNKpDVXFmJ9kclXrtKG21AZe0Me8Oh5D4z4MenzguOkR92wQ9pTKvVJqdFLb9L1MVhgFQcCPpfSrT6yfj8UJ0gLC6akjaR8rFubx0TjXCVsSs990yVWKVSIEQdBFKcX4ZvMKcBzw9IHepH2QIAiK/FUqii1Y31CR8P3fv3kCvaNezHXaZGNcLVmZRSNNNd5+QFgpNTIZUFXCOxOODY6jb8wLi8mAcxcl/l30ZH1DBeqctoSr2hzEYFGydqMVtU7xNxjzBlX/BsOSUiqRr59sOpwVpVTy4WyiCnzeQAhvS+kaF6cwvQ/7So0oOje9fud4QSlArCwMKL/Xx7wBOSC8MoXy2mjgcN0aUfXz+N5uVeebT+jlv5gIm9mID60Wq509Fsfw3CPFOdJVSTHy1VfKH+Tley5yXFRdytL3lJ8vG3emUscXqzQ7Z9s54iiwOI6Tj5crX6lAiMdf3j2VdBvWl9yycVHSPgcQx7zZeLbkG30pFl6U9MfLaxzgONELTWkAONV4daUCn+PZAgWlCE0Ylib0Bg4Zr/SvkVawlA782ANPSeU9RrK84FRVeCJJ15g9XZjaYVFlseqy9VqQL55SosGuNkqpyPS9dD0DnjnQh/dPjcBuMeKOKxKbVOvRXuTy1jpUEgJyl2KViL4xL0anAjAaODk1TUtW1pXixnViqsqPn+2I2yZ4XsCPnjmIn0WoqRINAsemAnjug7647w1P+PGrl48CAP7f1ct1CSquTNOYMx3UePsB4iSAqS+z1b5elfwCNzRU6HK9lWA0cNi+pRHA9HbD/t6+pTGtanhqKbGa5Ep5fSrNzplSKlEfzMzOmWeLHrBUsmTV94DEFfj2dLrhD/Koc9pSluRWu2DGfud4T5VMfmc2GYoNZKxQObk5JG0312lTVJjgBqkK30sdA3LqZiExOhWQq2bpUXkvETdIqY/PHujFVEwKmCcg/vaZZBiIn8/PoFSXewIhXkCxxSin7AFAjaRGGVSjlFJQfQ9QFwwXBCFlVT/2+lgO0qum/CF8/sH38MS+07JHX7JnhsVkSPhsYYxOBfDiwX49Tjdv+cObJ3Dno+/Lf6f73C2yGNEgVSlVOqaSlf0JsluyOUbLdygoRciEeAG7j7nw5L4eWS2kFJa6V263ZDyQZgO/Az2jCCionnHSzcx1lU2EAERUUJg+eFMi5WeEy0SrT8dK53rrlbaklHzxlJr0hxCUrlemSimm7AvygqrvxX6/x97rxr/89QMAwD9dvDjK5yOWdVJ72d89oqoUcjL09JMCcpNilQwWSF5cVaxbUOHOq5bBajJgT6cbf2vvj7pPfYEQvv6/+/Gb1zoBAN/ZvBIPbF0nG0Yzap02rJ1fhiAv4EsPt+Kht08CiL7vv/P4AXi8QayodeB6acKnNayvOOme1FWNFOIFVd5+jHrJ1D1bSrzXjjC/wNyk7jGam+pwf4J2c//WdWhuqsvaudSk6SvFAhOxJueM7HpKpUrfE8cHXe5orzi2CHXh0qqURRNYyn7b6bG4Vfzi0dxUh8uWT/cuy+R3HmRKqZhqbWrNzsOVfJUpTlfWlWJFrQOBkIB/f/FIWmPFXPK+9KxcUGGflkajJ+csLMe88iJM+EN4ISYYMKaZUkpsC/kWlDo6EK68F3l/MaXUgAqllBJPKSDc7yh53vmCvFysoSRB+mSpLTtKqdg5gXvcj62/fRsvdQzAZjbgvz99TsKxRmRfkuzZsrreiSAv4As735PVV5nM/fKN2O8SDPH4+QuHsf2pDyAIwKc3LsR/3pzZc3eFSvV5IpNzeX9Svx3PTma2kX2pRYb86le/ws9+9jP09fXhrLPOwi9/+UusX78+4faPPvoovve97+HEiRM444wzcPfdd2Pz5s1ZPOPCoKWtFzt2tUdVJqhz2rB9S6Oim5SZnFdmuNoDAA2VxXAWmTE6FUBHrwer5jmTbt+l0scECKe+9Y56MTLpl9Vdp9yT6ByagNHAYeOSypT7YYPU1i6xIo/SSmDpXu92hf4PepEvSilmcm4xGlCUYWDCajLCYTXB4wvCNeFTtGIc7/czcKKCLRmLq0rgsJng8QbR0edBU33ytp0KXzCE9tNim9DLIyM2xSpd3wut0DN1jzG3rAifuWARfv3349j2UKscAAXEya8vyMNo4HD3javxD2eLq+BXNdZiT6cbAx4vqh1hCfh3n2jDn/Z04TuPt+Ht4y7sOTE8bfJ/5cpq3VQxlSVWzHFYMejx4VC/B+t0KoN+emRK9PYzGVCrwNuPUV9WhIO9Y+jOQtDTFwzhreNiqtaFS3Njch5Jc1Nd3HaTDYVUJHVOG44OjKuuwKdcKaV/9T016XuRz+pXJdP7ixSY3i+qtKPMbsbIZAAHe8dwlgK1TSDEy+n9W86ai13vn0ZdqQ2vf/PytH/nIWm8FeszwyY9R/rHEQzxMBmTX5NwX6p8katxbik6+jz43Rsn5NfUjBVzid4LOIkwGDjcsLYev3zpKB5v7caHz5orvycrpZIsZimBBbXyzVMqtvIeo1r6vv26eEop73dYoInjgOIE2QeyUkrHcW+8MaXJwCHICyi1mfC7z5yLc6R0cyXPjETPFkEQcNdjB/Doe934xv/ux1vHXNh93JX23C+fiHcN7RajbFD/1SuX4Z+vWAqO47CpKf3n7sraUjxzoE+RTyfPC7IiNVFxKhbkOtznQYgXsv78zycKSin1yCOP4M4778T27dvR2tqKs846C5s2bcLAwEDc7d9880184hOfwGc/+1ns3bsX119/Pa6//nq0tbVl+czzm5a2Xmzb2TptQNo36sW2na1oaetNuQ/XhPggzNRPChAf4GHvndQKJCbHV7M6X2ozY56kAIksxclWTdctKIubXx5L01yxWqCaijyZXG/WCeZMKZUnnlLMT6q0KL7BrloqpGCqS4GvVKLfjxeArz6yL+nvF9m292ngK/XB6TH4Qzwqiy2YX1GU8f7ikYsUq2SEV/f1vQeW1Yj7D8asGjJlxj9dtFgOSAGQA9nXranHxiWVMBo4GA0cfnRDE26XKuo99X5vXDXKr14+pqifTZdsmJ2zNOr55UWqqlrNy6JnWevJEUwFQqgqseasD40lXrvJNiyIqKY8OwAMy0Gp+M99hwrFQrooqb4HiIFmAwd4A7xc8WvA40VHnwccp0wZzXERYxOFKXyvHh6Ea8KPqhIr7rjiDADiokomP/MQS9+LCWTMKy9CscUIf4jHcQXVQw+qrOTb0taLx1qn+yKpGSvmEvab6ZXqngyW+vjqkaGowBHzlFJqZJ0IptBmAct8IV7lPQByUSJ1SillFZfVKKXYPkuspoTPLb3HvYnGlGzs8c9XniEHpADlz4x425mMBvz0H1bj8xcvBiD6nGUy98sXEl1DFpD6+Pr5uOPKM+T5QibPXaYsVVLRuHt4CuO+ICxGAxbPib9ovaiyGDazAVOBEE4mqAg/WyiooNTPf/5z3Hbbbbj11lvR2NiIBx54AHa7Hb/73e/ibv+LX/wCzc3N+PrXv46VK1fihz/8IdatW4f/+I//yPKZ5y8hXsCOXe1xPQ/Yazt2taeUc4aVUtpIopUaigqCEPaUUhGUAsITtUgJJkvtULqKLlbkYdUCUw9SM7negRAv+yHkWik17gvmVOI7MiW2N2eRNqodZnbuSjGgS/b7MVLdL2tVmuUmQ/aTWlCmSXAuEdlOsUpGuPqkfvdASEH1vSf29Si6BziOw1evWpZydVdJP5suyTz0tCK8OKA8jRrIrmdZZOpeNsrBFwrpVuBj/XBCpRRL38tGUMqcfDhrMRkwV2prrK2+IS1CnTm3VPGCGlOkKi1WwcytP3zWXDkAOxUIZZQCz9L3YgMZBgOnuDpU5Aq+kr6UPfvioWasmCsEQYgwOc9O5b1IFs8pwVnzyxDiBex6/7T8Oqu+p5VSKt/S9+JV3gPCSqmhcZ/iNjOWpEpeJCVpKKUcSRTgenpKKRlT/va1Tk3vK47j8I3mFQmvYyHcz5EouYZ/PzSo2Xdhc75jg+Mp07gPSpYrS6tLYE6gXDUaOCyvIbNzoIDS9/x+P9577z3cdddd8msGgwFXXnkldu/eHfczu3fvxp133hn12qZNm/DEE08kPI7P54PPF+7Ux8bEBhUIBBAI5DZlKRPYucd+h7c73Ukl+wLEFLfdRwewIUlFgkGPOJgtLzJpcp1W14cDPcn255rwyw+emhKzqmMvqy7GC+1A++lRBAIBhHhBHqRuXFymeF9n1TvR1jOG1hNubFqZPJiVyfU+3O+BP8Sj2GpETYk211kJkW3HZgyvRg+PT2Xs55Qubkny7SxS95snolyaVA2MTSbdnxb3yyq5bbszPvfWk2Iq0ur6Ul3bw1ynFQd7gZOucQQC2R/QM7yBEI5LK69Lq4rifudEfZ0aUv3OgLJ+MXJ/yfwolPaz6XJGtRgoOtg7qls76RwUB1P1ZTZVx6hxiPdet3tC9z6NBaU2NpTr1nYKkTmSUvT08JSq7+6WJsClVkPcz9mlQNHYpF+3azolmaibDal/twXlRegensLxgTGsnefA3ztElf0FiysVn9+qenFyHTk2SdRuPN4AXmgXPYQ+vLoGRvAot5sxPBnAKZcHdrN6tR7PC3KlWKdt+nVfVl2M904O44OeEWw+szrhfk64JjAVCMFqMqC+NPVzVKuxYq444ZrAyGQAFpMh4bNDb65bXYv3T43gsdZu3LJhHgKBgJy+V24zZnRO5UXi2GxwzJs3/ZcgCPLzekG5Neq8Sq0GGDhRYd43MiEHqZLBVE12E5f0OxaZxWs6NpW63xmZENt0iTXxmLrEKl7b0Umf5tdW67GGmuOmMybJx2dktq9hdbFJtuE41DuSVHX9QfcIAGB5bUnSa7a8pgTvd4+irXsEV63Ird+lHihtLwUTlBoaGkIoFEJNTU3U6zU1Nejo6Ij7mb6+vrjb9/XFr4QEAD/+8Y+xY8eOaa8///zzsNvVKXHykRdeeCHq7/eGOACpPXmef+1tuA4mjjLvO2YAYMBQzwk880xnhmcJTAQAwIQTrkk8+uQzKE4Q+zjhEbdzWgS89MJzqo4x6RK/+9uHuvHMMydx0gOMTplQZBTQvf9N9B5Qth/OLe7n5QMncJZwLOm2mVzvdwfFz1Zbgnj22WeVnZyGsLZjNhgR4Dk89ewLqFRuHaMpu/vFa+Edc+OZZ57JeH+TbrH9vrW3Dc7BxD+8FvcLa9vHhybxv089A3sGvfDuw0YAHLynD+GZZ+L3g1oQGBGvz2vvfYDyIYU3hg6cGgd4wYRik4D3XnsJycRhsX2dGrTqF/Xan1rExWoT2rqH8fTTzyS9bumy55DYRsb7OvHMM8cVf+7kuHhux/tGNLmXEzEeANp6pPvl5D4807sv4baZtJ1C5NSw2D4Pdw+o+g1OnBav59H2A3imf/+097tOS/vt7MIzz5zQ6nSjOHhCbHfdJ0+kbnfj4rYv7zkAW+/7eKldPH+z6yieeeaIouNNBgHAhC73FP7y5DMoiRibxLab3f0c/EEjaosEnNj7Ok7uA+wwYhgc/vri6zhWrv5eHw8AIV58aLzz6kuIXXwPSn3NaweO48zg0YT72SeNf6qtITz/XEvK4+a6D8uUd6Tx01xbCH97PvX31QNrADBwRrSdHsPv/vcZ1NoBT0C8pkcOvIfQyfT33S318T1uj679qBrG/IDHawIHAQffeQ1HY9pqicmIsQCHx599EfNTFNIN8oA3ILb7t157Oem4qbdLvM/bDx/HM0nuAQB4X7oPAlOJr1vvKXGbNgX7U0uu7qtMj5tPz8hcXMNqsxEeL4dHnnsd585JvM9XpHGR4D6FZ57pSrhdSGqHr+4/iuX+w5qcYz4xOTmZeiOkEZT64x//iJtuuglWa3RU2+/3489//jM+9alPqd1lXnHXXXdFqavGxsYwf/58XH311SgtzU3KlBYEAgG88MILuOqqq2A2h0dRlZ1u/PHIuyk/f/VFG5JGmP/68D5gYADnrW3C5vXztThl/PeJ13F8aBJzVp6LSxOYkD71fi/QdgDL5lZg8+ZzVe2/0TWB/zn8BgZ8Rly96Sr8+rUTQNtRXLS8BluuXaN4PyuHJrDzF2+gZ8qIK6++CpYkhquZXO+25w4DR0/g/JULsHnzSsXnlymxbedfD7yCwXE/zt54YcJqEnrT83oncPwIli2sx+bNqzLe38EXjmD3QCeq6huwefOKhNtpdb/8V+frOOGaRM3K9WlXARv0+ODe/XdwHPC5G65KKWnPhN43TuDVlsOwVc7F5s2rdTtOKh59rwc48AFWza/AtdfGv98T9XVq0Op31mt/agmEePz8gxfhDQFnnX+ZnEakJQ907gbgQfOF5+DyONXGEjE07sPPD/wdY0EOV17dnLT/zISnD/RBeHc/lteU4BPXnx93Gy3aTiGyuM+D/+rYjUlYsHnzZYo/d9/h14HxSVxx0QasXzS93U68140nTrajtLIamzev0/KUZfbsOgj0nkLj8qXYfPnSpNt2v9aJN58/AkvlXCw9uwFjb+1GkdmAL3z0ypRG6ZH8pvMNHB+aQNWKc3H58jkJ281Dv30HwDC2XrgM117cAAB43NWKnsNDmL98FTafMy/BERJzuN8DvLsb5XYztnzo6mnv13aN4NHf7IE7VITNmy9JvJ8XjwKHj2PD8nnYvPnMlMfNdR+WKe/89SBw9BQuW70Im69ZnrPz+JunFS8fGsKw8wzccukifP3tlwAAW66+VDbjT4dBjw8/2/93TAQ5bGq+Ji/Mkt/udAPvvYv5FXZc96GLpr3/Xyd344PTHiw769y4FSojcU34gbdfAQDc8KHk36/n9U4813MEVbWpx4eTrT3A4Q+woHZOwj5qYPdJtHQfQnm19uOfXN1X6R43H5+RubiG7/AHceztU7DWLMbm5sT9yb2HXgMwhRsuW4/zkxTPqjrhxv/99l0MC3Zs3nyxJueYT7Css1SonsXceuutaG5uRnV1tCzY4/Hg1ltv1S0oVVVVBaPRiP7+6HKq/f39qK2tjfuZ2tpaVdsDgNVqnRZwAwCz2Zw3N2AmxH6PjUurUee0oW/UGzcfl4NYKnPj0uTVodyS4Wl1aZFm12nNgnIcH5rEgR4PrjpzbtxtekbF9IFFlcWqj7uk2okisxFTgRB6xgJ485iYCnXJ8mpV+zqj1ilXCzw6NJW0Ig+73smkpjWl1rjX+5BUWrex3pmTtsjaTmmRGYPjfkwFkbN7Ytwn5nGXFVs1OYcqhyj5Gp4KJt2fVvfL2gXlOOGaxP7THlzemF6Fk7ZeFwBgWbUDFQ59TM4ZCyrFZczeUW9O+8Ejkj9F49yylOeRSZ+t1e+s1/7UYjYDS6sdONg7hiODk2io1jaYLAgCuiW/sSXVDlXXvbbMJFc0dE2GsKBSn1Ltu4+Lnn8XnTFH17ZTiMyX7m/3RAAhGGBTWNGUVUGdU2qPe72cdrFfnfDzul3PgOQTUmRN/ZstniOmWZwa9mJ35wgAYMPiSpQUqWtz6xaW4/jQBA70eLCpKTw2iWw33cOT2HNiGBwHfOTs+fLrdWVi4GFgPJDWNRnxis++qpL4z74z54np1f0eHzx+IaFX1mFpPHGmwvFErvuwTHm/W5wUrVtYkdN7+8az5+PlQ0PYtb8P/3RRA/y8eK3qyophNqe/sFTtNIKT0uE8fgFzHJkXHcqUE25xnLs0wTOhprQIH5z2wDWZfNwFAFNBMWW1xGqCzZr8uznt4v08EUjd70wFxNZcarck3LZM2t+4L6R528nVfZXquIDo+5XouPn0jEw1r9LjGp5ZXwbgFA4PTCS8DhO+ILrc4rioaV550uvVVC8Gy3pGvJgKhc31ZwpK24rqJclEZe+7u7vhdGZW3jwZFosFZ599Nl588UX5NZ7n8eKLL2Ljxo1xP7Nx48ao7QFRcpho+9mI0cBh+5ZGAOKNG4/tWxpT3sjM46BSg+p7DGZGmcxQNJ3KewyDgcNyKRf43RNutEpG5Refoa5UOMdxconhVBXVjAZOrsCTcH/g4lZB6pCMS3Nlcs5gVQn1MH1UCpsMaeVpxUxC3RPJTULZ/ZJo8AAou19Ye8nE7Dyb5a2zaUadjGxV3kvWL6r5nfXaXzows3M9jDSHJwPw+ILgOGBeubq+mOM4uX11jyiTeKtFEISwyXkC1e1sxllklpVCSqth8byAUakfLkvQD5ewwhg6VmtVWn0PAOZLSpQu9yRelU3v1bcHJc/7J/eJZtbnNVTKButA2FQ+XhVOJbDKbVUJisqUWE1YIH3PjiRm52or72k1VswF3kBI/r7ZeF4m48qVNXBYTegZmULLB6KVSJHZgOIkRttKMBkNqJCqYOaL2XmiynuMmlKxDSvpc+TKewoU4eqq76U2T2fV/tg5aEnkfRWLnmMDJfezNxDCkYH8N942Gjh86bL4Klm9ruEKuXhM4uvDxlpzHNaURcCcdrM8DtKzSnK+ozgotXbtWqxbtw4cx+GKK67AunXr5P/OOussXHTRRbjyyiv1PFfceeed+M1vfoM//OEPOHjwILZt24aJiQnceuutAIBPfepTUUbod9xxB1paWnDvvfeio6MDP/jBD/Duu+/i9ttv1/U8C43mpjrcv3Udap3RBkHOIjPu37oOzU2plRwu6SFYWaJhUEpSHO07NQI+QdWELre42rdAZcUnBqui8Ps3TyDIC1hYaZcHrurOVQqgKajAxwayZmN0BzmnxIqyIjP6xrz4h/vflCvtAeL1HfD4wHGQqzTkivADWr+JRirkyVCCqk9qqVBYfQ8Q75fmppppr9c6bYrvF9Ze9p0agSCkl+POAlprslDeul5K9xrw+FJWG9ELQRDClfcUTqQyIVG/qOZ31nN/aolXbVQrWBnj2lKbYpVNJKx96VXd8djgBE6PemExGeKmmc12OI5TXYHP4w2CPZadCavviW1BSRWsdPEFWFAq9XCWLV65J/x4+7iojE4nfTqy/45X0UkQBDzW2g0AuGFdfdR77P7vjbPwpAQWcJiTxBhanjAlCECPeQOysnGligB/oj7MZjZkpQ9Ll7aeUQR5AXMcVnnilytsZiM2rxKv02/fEE2ktKpanW8V+I5LyubFc+IbRs2RFOoDntT3AqtW6VCgICm2qAlKiWNJRdX3dBrzNjfV4f+7oWna63qPDRLdzzWlVsx12jDmDeJjD+zGuyfcuhxfS96RztESY7Kn1zVcXusAx4n3GlsoiKWjT52QYIW8cKhfleR8R3Fo/vrrrwcA7Nu3D5s2bUJJSbiTsVgsWLRoEW688UbNTzCSm266CYODg/j+97+Pvr4+rFmzBi0tLbKZeVdXFwyGcIM8//zz8fDDD+O73/0uvv3tb+OMM87AE088gaam6Tf/bKe5qQ5XNdZi+XefRVAaZF2zqlbRjewP8nJnXVmsXerFiloHbGYDPN4gjg+NY2n19METU0otSDMXf3mMeuCCpYlzfpOxhilfUiiljvR78Jd3TwEAHvrcBoR48YFc7bBhfUMFeken8Knf7sHxoQl87Ne78ftbz8WZc514rFUsK13tsKY16dOS8AM6d0qp0UltlVIsmOqaSB2UAoDeEXEQte2SJVhR55B/P6UrMSvqHLCaDBidCqBzaCLhoC0eIV7AW8ddeI9V3ptXpviz6VJZbIHNbIA3wKN3dAoL0wwCp0uIF9DS1oeRyQA4AIsTrLxqDesX93S6o+7TdFfctN6fGpi6bG/XCJ7c16PpsbvcYj+cTkAf0FeJF+IFPLj7BABgWXWJbp5VhU6t04YTrkn0KQyWjEyJfaXdYkyoUiqxiv2zrkGpYAiAsqCUw2aWq9/5Q2IlvMVV6vuSZTUlKDIbMe4L4tjgOBoqoid1B3pGcWxwAlaTAdc0RdtFhJVS6bX1wfHkSilAnAQ9396fUCl1WBrv1DltKLOrW0iM7MNau4bxs+cOIRQSsHFxdipGhXhBdf/JFnDWzi+Lm+mRbW5YV49H3j2FY1LQxmoyIMQLGffFlSVmoB947oM+mAyGrD1bEhFWSsUf37CKe/1qlFJFqaetTHXmUdDvsL4pqVLKpp9SitFQJV6jmlIrvr15ZdbGBonGJOPeIP7xD+/gvZPD2Prbt/Gfn1yHS5ZV4+1ON94b4lDZ6dYkHS6d+zmWA92jsjL10S9sxKQ/pPv4ym4xYVFlMTqHJnCozxN3kYApnlYmqc4Xycq6UrzYMZBUfTXTURyU2r59OwBg0aJFuOmmm2Cz5abs1u23355Q6fTKK69Me+2jH/0oPvrRj+p8VjODQIiXA1IA8P6pUUWfG54UB6dGA6dZkAAQ5cir55VJg5+RaUGpKX8IA1KEOh2DyJa2XvzyxehKGs8e6MPFZ8xRHVVfIwUGTrom4Rr3JVz5urvlEHgB2HRmDdY3TA+AzSu349EvbMStv38H+7tH8dEHdqPYapLTI/vHfLjw7pewfUtjzlYlww/omaOUYsFU94QfPC/AkOQhNjzhx/4e8d74zAWLUFOqvi80Gw1YVe/EuyeHsbdrRHFQqqWtFzt2tUflzv/j79/BDz6sb3vgOA5zy4pwfHACPcPZDUrFfmcBwJU//3vW7gGjgcPGJAaVud6fUtj16xvz4o4/7wMgTkq1uI5yGnWmQSmNlVKxbaft9FjO+898pc4p/gZK08qGpYWB8iRBjaym7ylYrGlp640KkA1PBnDRT19W3R7EsYkTb3e6sbdrGA0V0Z9li0hXn1k7TdkRVqSlqZTyiGOBqiSeQUz9dDDBins4dS891TXrw85bXIGn9p3GoX4Pnj7Qi5s3LEhrf0qJ9/xT0oftPSUq2NfkOHWP4R73wyD5PwGikjPTfqmlrRetUvBt51td2PlWl2b9ezpM+UPyIkPi9D3xXhhUopRiiiYFSikWYFKXvpd4v6WyZYV+/RjL+lhW48B1a+pTbK0t8cYkTrsZOz+7AV986D28fGgQn/vDu3DYzNLY24g/Hnk34/aV7v0ciSAI+EnLQQDA9WvmJvX01ZqVdQ50Dk3gYO8YLoyjuD2o0nKFLRzqoWYvFFQvGX7605+GzWaD3+9Hd3c3urq6ov4jCpdY5cuhvjFFnTqTCpfbzUkn8+nAUvjiee+cGhYnQg6bSXVwoqWtF9t2tk5TxoxMBrBtZyta2npV7c9pN8sP3ve7p58rAOzpdONvB/thNHD4RnOSCm8lVjx823lYXlMCX5CXA1KMvlFvWueoFSynP5eeUqMae0qx9L0QL6RUgL1xbAiCIKZRphOQYsi+UqdSp3wC4TYbO5npH8tOewj7/mTPVyrRd871PVBotLT14pv/u3/a61pdx0y8/YCI9D0N2xa1HXXUqgyWsMWoZH0w83bxh3hZ0aQ1YU+p5MNZ1h4Coeh0u3TbA/O8jPWVCoR47HpfXLX/yNrpk8taKfjn8QYVja9iUaqUAoDD/eMIhqanW7O0vkz9KTmOk9MTH9/bndG+UpHJ/RxWSpXreYqKaGnrxZcebkVs1mcm/RK7Nt5A9G+dy76uc2gCgiAuHCYy22dKqYEE6U+RKPF+YhTLnlKp+xw23itRkL43FQghEOd+0oJMn6F6UGQx4r8+dQ7WN1SAF8LjboYWbTbT5/OrR4bwxlEXLEYD/t/V2a2qyfz44gX/I+0mlHqgsv74UJ8noWXNTEd1UOrIkSO46KKLUFRUhIULF6KhoQENDQ1YtGgRGhoa9DhHIktEdvpznTbwArC/O7VaKmxyrn3VpGSGopGduBpJdogXsGNXe1yzavbajl3tcb0ikp8r85Wafq6CIODHz4rR/JvOnZ9QzswoMhunPQC0OEctyAdPqRF5QqSNh5nFZJAHHkMpfKVePzIEID0vkkiStZdY9Gqzapins+9PLPnwnWcC2biOp6T0vXS9/bRO36O2ox61Btwshbq8OHVQCtBPLaUkfU+P9pCoWMXrR4bgmvCjstgS9xlRYjXJ/jVKUyUjGfKk9pSaX26H3WKEP8ijc2hi2vvhghGZe/Ndt2YuOA5458Sw3A9oTSa/X9+oF72jXhg4YPU8/YoxKUGPdpivfV1k6l6i8Xm1ZHQ+6PGlnISzRVAlVcmKJS+7CX8w5X6VpO9FvqfXuPekm6mNs2uNkAoDx6HLFf++znWbDfECfvyMOK/61MaFadsHpEsys/Pu4SmM+4IwG7mU8z3GospiWE0GTAVCcnuYbagOSn3mM5+BwWDAX//6V7z33ntobW1Fa2sr9u7di9bWVj3OkcgSkZ3+GoXV5ICwOXSi1ZBMYBP3eKotZq6rthPf0+lOuhosQFwt3tOpztxvTRJVV0tbH/Z2jcBuMeIrVyavvsfOsS9Jnn2656gFufaUCvGC7BWgZbpouAJf4qCUWMVLDErFk+uqgU1qOvo8mPInX9HTq82qIdsV+PLhO88EsnEdT7pZX5yZUqp3xKvJCiG1HfXUlqoz4GZKqWSeREYDB7tFX7PzsNF54vQ9PdoDU3Ef6vdEfbfH9oqpe1vOmguTMf4QuzaDCnyy0XkSpVRkZeFYs3OeF3BIeq1Rgyqmdc4inC+l/jwhfXetyeT32ycpkZfXlmZc4S5T9GiH+drXsaBUMs+2qhIrOA4I8gLck8kXA8dUKKUckpedIACTgeRjKyXpeyajQe7H9PKVCi/s5I9SCmBzkfxss0/s7UFHnwcOmylh9T09YcqmowOeaQo6FvhfWu2AOcFzIBZjRL+drHLqTEZ1UGrfvn349a9/jWuuuQZr1qzBWWedFfUfUbhEKqXUVJNjKXBaVt5j1JTaEqq2utLsxJVU+lCzHYMFGd6PqRYYCPH46XOHAACfu2gxqh2pU770OkctyLWnlMcbACtYp2VQigVV3ROJg4HHhybQMzIFi9GADXE8wdRQ5yxCTakVIV7AgZ7kisR8aA96V0iLJR++80xA7+voDYRko9p0Uw9qS20wGjj4Q7ycnpQJ1HbUU6vSgHtEUkqVpeiDmVpKr+dF2FMq8XBWj/ZQXWpDfVkRBAFy/+3xBvH8B30AgI+sS+wLozZVksHzgjzeSpa+BySuttnlnsSkPwSLyYBFGnkD3rB2HgDg8b09aVeTTUYmv182q9SmQo92mK99Hau8t6Q6sUrEbDSgUhp3DaQwOx+Tjc5Tj/lsZgOYk0iqFFm5+l6KYJfevlL5mL4H5G+b9QZCuPd5cV71pcuWolwHUUQq5pUXwWE1IRAS5CAsg6mn1FQ3BcKVpWerr5TqoFRjYyOGhob0OBcix0R2+msjqsmlGmS4pElEpU6dwpoE3jvpVt5TEhRSsx1jeY0DRWYjPFJFHsaf93Shc2gCVSUW/NPFi3N6jlqQa6UUS2u0W4yaVtJiQalk6Xssde/chnIUWTKvgqg0+JsP7aG+TLzPTqdZNUot+fCdZwJ6X0e2OOCwmdIOEpuMBlmp061B0JPajnpYoGTA41Pkm8JSqJMZnQMRZud6KaUUpO/p1R7WyAtRYlDqufZ++II8lswpxqr6xKlirK2rrcA3POmXU1pSLQKuTLDizsqNL69xJFRyqaW5qRY2swHHhybwvgLLB7Vk8vvJflJ5YHKuRzvM174uVeU9xhzpvPpTBCrUeEpxHCer4lL1O+MK98ve10MpNToZkMe188vzKyiVr232D2+ewOlRL+qcNnzm/EWKj60lHMfJflEdMSl8rJ9lQSalyGbnfbOzAp/qJ9Ldd9+Nb3zjG3jllVfgcrkwNjYW9R9RuLBOv9RmQlO9EyYDh0GPL2W6juwplWLlLl3YxH1fTFpclzu9ik/rGypQ57QhkQsVB9FfY31Dhar9soo8QHggNO4L4r6/HQEA3HHFGUnNFLNxjlqQa08ptkKvpUoKAKpKmFIqcVDqtSODAIALl87R5JiJfEliYe0hEdloD3PLpNV9jVKsUpHP90Ahofd1TNfbLxYt00Pz4X4pNKqKrTAZOAiC6PGSClZ9L1WREYdsOqyzUipJ+p5e9wBL4dsnBWJYWfKPrJuX9F5ItwIfWzAps5tTpoSElVLRk5t26e90K+/Fo8RqwqYzawEAj7dqb3ie6n4GxEWl2N8vEOKxv2cEALAuD4JSerTDfHxO8rwQVkolqLzHqGG+UqmUUio8pYCwQjNZvxPiBUxI1gmpqvqxca8ei7Es/b2qxJrzFNNY9GyzyaiMcz8zRib9+NXLYuX0O69aBpuCyqt6sSKBsins26dSKZVA4TpbUB2UuvLKK/HWW2/hiiuuQHV1NcrLy1FeXo6ysjKUl+e+sgWRPpGdvs1slG+OVL5STE6uh6cUgLiqrRAvoHs4vfQ9o4HD9i2NADCto2V/b9/SCGMalQRjVV3/9epxuCb8aKgqxsfXKy+XrOc5Zoojx9X3tK68x2Dt15UgfSgQ4rH7mAtA5ibnjEQVnGIxGriEOfPZag9ap1ilIvIeiCXX90AhoXdfkq63XyxapocaDRy+dU38CqfUduJjMHByNVElBtwjUywolWOllOwplXg4q9c9EC7EMophH/D2CfG5f92auUk/xyrwqfWUUuInxWDeJH1jXgxHLLR0qCxTrpQbpEqDu/b3al6hzGjg8M0kFYsBYHTKj5a2vqjXDvV54A3wcNhMWFylzGxYT/Roh8n2ych2X9c75sVUIASzkUtpPh2uwKedUgoIB6WSFViIfC/VgnE4Q0D7fixfU/cAfdpXsrEdY3QqgBfa++K+95+vHMOYN4gVtQ58ZN08xcfVAzmIFKFsmvAFZaNytf0sU1Z1D0/lLBsll6gOSr388st4+eWX8dJLL0X9x14jCpfYTl+pisMllyjWJygVT7XVOzqFQEiA2cihThrgqaG5qQ73b10npywwap023L91HZqb6tI6V6bqev3IEB7cfQK//vsxAMDXNy1XbHan9zlmSqSnlB7+EanQKyjFqke6Eiil9naNYMIfQmWxBY0aDehX1TthNHDoG/OiN0Uqx7snRNNHS0w7ylZ7MBkNqJEGkH/a04Xdx1y6V/RpbqrDf35yHWJFB7m+BwoNvfqSEC/gHcmM1GjgMmoPYaWUtlVnjNR2FKOmAh9L38ulp5QgCOH0vSSeUoA+98CZc50wGcRnxkNHDRAEYP2icsxLkYKTvlKKjbVSB6UcNjPmV4j3VEfEhEltmXKlXLi0ClUlVrgn/Pj7oUFN9w0AglSXK3byW+u0Yd2CMoR44PY/tWLnWycBiH3TY5Jqa1GlPW6lr1ygRztMtE8AuLKxJut93bEBMXVvYWVxynEvS9Hq19BTCoCi9D2PT9yn1WRIaQUR9pTSPlCQbtZHtkjUvuwWY9ptdtOZtSiNE2CsddqwZn4ZgryALz7Uij/t6QIg3s+7j7nwP2904nevdwIAvnnNipwvLIXT98LKpsP9HgiC2E8r6asjcdrNmCtd50OzMIVPtU7wkksu0eM8iDwgttNfu6AMf9x9MqXfjVtWSumTvsdUWwd6RrG3awTzyu1yidL55fa0O6Xmpjpc1ViLPZ1uDHi8qHaIEtRMOjk2UD81PIXvPfkBAMBs5BKuMOTiHDOFBS39IR6+IJ916Wx4hV7joFQJU0rFD0qx1L0LllbBoNH1L7IYsaLWgQ9Oj2Fv1wjqVsUPsLb1jOIJKTXkL5/fiKlAKOvtoaWtV54UiSmpR1DntGH7lkZdB70r6kohCIDJwOHuG1dhbpk95/dAIcL6ku8/eQAPvX0KGxrK8fBtG9O+ji1tvdixq12eWD/1/mm8c8KddnvQ2kj/sVaxEti2S5fggqVz8qb/zGfUGHCzNOry4lRBKfF9PZRSQV4Ai4MmS99jaP08feXQgJSmJ+DImDipPdQ/jpa23qT3gGwqr7DSIYOlVVY5lI21VtSW4pR7Cgd7x7BxSSU83oA8AVbrdZIKk9GAD581F797oxOP7+3BlY01mu6f3c9fumwJNi6uivr9AOD7T7bhobe78N0n2rDnuAt7Tg7LwdUDPWO48O6XdH9WKYW1w91HB/D8a2/j6os2YOPS6oz6pdi23TM8iZ8+dxh/PzyInpEpOeifDY4rqLzHYOl7SpVS8QIZ8ZDT9/xJglIKKu8xwp5S2vdjbD6Tb5X3Iolss79t2YOXT4sm9eneT4f7xzHmDcJq4vBfnzoHI5OBqPv5u08cwJ/2nMJdjx3AW8ddeLvTHbVYYjEa4E1RtTobLK9xgONEL0bXuA+VJda0Tc4ZK+pKcXrUi47eMZy7aHZZDKTlcvjaa69h69atOP/889HTIz4oHnzwQbz++uuanhyRXWKVUmsk1U/b6TH4g4nl2GwSr1f6HhApkx8BAFkamWknbjRw2LikEtetqcfGJZUZDQpa2npx12MHpr0eCIkR/5a23pyfoxYUW0xyZZNcyEvHdFZKJfKUelUyOdcqdY8RViQmDv7e3dIBAPjwWXOxZkFZ1ttDS1svtu1shT8Uvd7cN+rFtp3pt20lsJLeq+Y5cePZ8/PiHihUjAYOm84UB5GDHn9GAaltO1unBS8yaQ9aekoNenxyEJm1mXzpP/OZsFIq9W8wzJRSKdL32HgiWRpNuvgixiXJ0vci0ep5yu6BQEyfODYVSHkPsOvsnvDDm6JkfSSDKlXpLHWEme4e7hcnS7WlNl2qVbGKgy8c7JcVzVowMObFG0fF5++N6+ZN+/2MBg7/en0T/vlyMcX9qf2909R+2XhWqcFo4LChoQJnVwnYoFGgPLJtb7t0Kc5bXAF/kMfPnz+swRkr55iCynsMZnQ+kMTHThAE2WBcqadUsVUMUifrd9SkBOrppco8pfIxfS8S1mab5/EwGTicGp6S0/fVwp7P5y2uwiXLqqfdzz+6YRW+dNkSAKJXX+z97A/xGc2rtKLYapIVbkyFKpucp5lRsXIWm52rDkr93//9HzZt2oSioiK0trbC5xM7ktHRUfzoRz/S/ASJ7BFrJLio0o4yuxn+IJ/QdM0XDMEjrYDqlb4HTJ+4yznYeSJ3DfECduxqTyoR37GrXfd0p2xgMHDyKpRe5XGTwdRounlKxQlKjUz6sb97BABw0RnamJwzZCP/BL5Srx4exGtHhmA2cvj6puWaHlsJydo2e03Pti1XT5pPnoVawAZKna4JTCZZRU6EXu0hUimVaVrwU++fBi+IZeAbFKzWEyLM6yiVUioY4uXJmdL0PT2UUr6IgE5sWrOeZHoPOIvMsEnphgMp0pYiGfKIz6Y5CpVSrAIfW7lvz3AFPxVnzi3FGdUl8Ad5PHtAu8nik/vE+/nsheVYWBn/fuY4DndcuSyhkiYbz6p8guM43HXNSgDAY3u7s2qcrLTyHgBUM6VUkvtgwh+SFZFKVE1ApEIzcdB3XErfUxKU0rPqtKyUytCXMVvYjOE52WvSYq1aXkuxyMtxHO68annK3yYf7udYc/KDsm9fmkqpBObpswHVT/F//dd/xQMPPIDf/OY3MJvDncMFF1yA1tZWTU+OyC6xUlaO4+TqMolUHExVYjRwilcw0mFthGrLFwyhS1pZSGWimC32dLqTDuQFiAP9PZL/SqETXjXKvlJqVKHBrlpY+t7wpH9adbk3j7kgCMAZ1SVxfRsygT3c93ePTjOI5XkBP35WVEndct6inLT3XLftfCrpPROY47CiqsQCQRAl9GrRqz0wpdSEP5SxyuLxvaKXDFNuEMqoLVXmKRX5+6RaHCjRMe2FKaUsRoNmKdVKyPQe4LiwF2YqL8FI1HhKAWIaCCAqpIIhXvY9WaGxyTmD4zjcIN1zj+/t0Wy/j0n7YmbqidjT6U5qRD3TxmGpOGt+Ga5dXQdBAH4ijSOyQTgopSR9T+xzBj2+hIsRbJxpNnJyMDcVJZJSKln1PVVKKZs+Y15fMIReKY13QZ7MZ5Rw4ZJKAGHFkxq8gRDe7mRFgxIv8u7pdCd9buTL/RwOInkgCAI65Aqn6SqlxM8d6vNkpdJ1PqE6KHXo0CFcfPHF0153Op0YGRnR4pyIHBH2lAp30Kw62N4EKo7I1D09B4ULK+0ol1VbnrAxYIJVs2yTKh9e7Xb5Dgtc6lGJJBXMy0Sp4aVSyqUgV4gXpk2K2YNXa5UUADRUFcNZZIYvyMsPM8YT+3pwsHcMDpsJX748fvU9vcll2/YGQvJqEQWltCOTssN6tQeb2Sirbbsz8JU60u9BW88YTAYOH1qdvAoaEY1ST6nhybDCwJRCoRRWSmm/gMGCUkpT97RCi3uAeemo8ZVSU30PEJXkRWYjfEEeJ1wTESv4+gSlAOD6NfXgOODtTrdcITkTOvrGcLB3DGYjh2tXJfevmW3jMCV8Y9NymI0c/n54UE6B1BOPNyCbli9WoJRibdkf4uWxXSxMke+wmSUPt9QoMTpn49dUlffEY+uTHXDKPQVBEE3D9cw20ZoLl4pBqTePuhBUWW3zvZPD8AZ4VDusWFaTuI0Uyv0sp9v1jqF7eAoeXxBmI6dIKRiPRZV2WE0GTPpD8lx3tqD6SV5bW4ujR49Oe/3111/H4sWLNTkpIjfEM/1bIymlEqUWsVSnSh39pABxBW5NhGor30qosgoiWm2X75TKq985VEppHJSymAzy93JNhKXkgiDg1cOS1HiZtn5SQEzbPhVWJHoDIdwreUFsu3SJLh4gSshl227rGUWQF1BVYs2qUetMZ0Xt9IoxStGzPWjhK8VUFZcur9bV53AmwryO+se8SVdoR6fE5365ArWqrul7CivvaY0W90CdwlTJSGSjc4VBKYOBw3LpXv/g9JhczYml9enB3LIinNcgTliflIpzZMLjksH5ZcurUz4DZ9s4TAkLK4vxyQ0LAQA/fvag7sqLziExi6GqxKrIYsFiMqBcKlrTnyC4EPaTUl6biyk0k/U74yqMzuXsAI2D6yzrY0GFXXHALR84c24pnEVmeHxBvC9ZWyjlVWmR98IzqpJ+50K5n1mQ/+jAONp6RgGIqaupKjomwmQ0YFmNNEbrm10pfKqv2G233YY77rgDb7/9NjiOw+nTp/HQQw/ha1/7GrZt26bHORJZIuwpFe74z5Imyyddk3CNT8/5dkuT98osRPiZauvlQ4NyAC1f5K7rGypQ57QlrLLHQRzws8oShY6slMqBp9SoTkbnQHiwH1mB74RrEj0jU7AYDdig0+8nG/lLqWoA8ODuk+gZmUJtqQ3/eEGDLsdVQi7bdmTqXiEN2PIdWSmVhpGmnu0h0wp8PC/gSSkoRal76pnjsMLAiVXt4nnrMYYnpMp7CiqgykEpHVS1flkpld0KsFrcA3IFPoVBKT7iN1HqKQWE7/UX2vsx4Q/BYjLo7rPGUvgea+3OyB8uxAt4Yp/y+3m2jcOU8uXLl6LEakJbzxh27c88UJgMNal7DJbCl8hXimVxKPWTAiKq7yVN31PuKVWqk1Iq3xbYlWI0cLhwqbhIq9ZX6nVp+4tTZB4Uyv1cX1aEEqsJ/hCPpyUvvcYM1ahMfdXeO7vMzlUHpb71rW/h5ptvxhVXXIHx8XFcfPHF+NznPofPf/7z+PKXv6zHORJZgOcFjPunrxo4i8xYKlXQiKeWCqfvKR8kpQubuDMJck2pFTZzdgejiTAaOGzf0ggA0zpQ9vf2LY0zpvITS/HMpVJKj6BUPLNzlrp39sJy2C3KV+rUEJsmOzoZwH+8LCpS77x6WU7bebK2zdCrbTPlGKXuaUukkabaSWNke4gl074uU6XUW50unB71wmEz4fIV1WntYzZjNhrkgEeyYMkI64OVKKWYqlYXpVRu0ve0eN7XyamSytr6yFRANvRVswjIJjcvtPcDAJbVlKRMucyUa5pqYTUZcGxwAgck5UA67D7mQv+YD84iMy5TcD/PtnGYUipLrPjCJWImyz3PH5IVhnpwbEB55T0G63MSVeBji9CR1iKpKLakVkrJ2SEK0vf08pTKNysSNTCTcjVBqaFxHz44Lap/LliaPPOgUO5ng4GT1eesn12RYTEJNkZLR81eyKh+MnEch+985ztwu91oa2vDW2+9hcHBQfzwhz/U4/yILDHuD0KQq1tEd9Bhs/ORaZ/LVvoeAKyeJ54HG5iVF1lyXnUhkuamOty/dd00I+xapw33b12H5qbkfgiFRKnsKZVLo3Ptg1JssB8ZlNIzdY+xRmrbnUMTePjtk/juEwcwOhXA8hoHblw3T7fjKiVR2waArRsX6ta291HlPV1YUl0Mk4GDxxvEaRXpQ4zmpjrctXnFtNcz7evYRP2dE27sPuZS3b+zVJ9rV9XlzYJFoVGrwICbVUDNtVLKF5CMzrMclAIyf94rNZVnMD+pMrsZZhVBJaaUYgG8imL9x00OmxlXn1kLAPjVy0fx5L6e9O5nSfV47eo6xWq42TQOU8M/XtiAaocVp9xT+OPuk9h9zJX275IMNZX3GCz9qj+BvxrL4nBYlY/5lHhKsfeUKLAifVQzrQ4bSbjyXmEppQAx/Q4QBQtK5wJMVLCyrlSR4rNQ7udltWJ7Z/3ssurMglKs3957aliX+zRfSXvZ32KxoLEx/mopUXiwTt9iMkwbzK9ZUIZH3+uOq5Ryj2cvKLX72BCMBk6+MTv6Pbjw7pewfUtj3nRMzU11uKqxFns63RjweFHtEKWluY7ka02pjhWVkuEP8pj0i6t8+iilWPqeOAEIhHi8dVyqErJUe5Nzxu7j4bb97cfb5NevbKzOm7YT27b3dLrw0Nun8Lf2fnxn80rNgwB9o16cHvXCwAGr5zk13fdsx2oyYml1CTr6PDh4eiwtvy6mGlxVX4rPXbQ4476upe3/b+/O46Oqr/+Pv2cmk30HQgKibCpEFFmKoqhVRFMqLq3WjVp/Wv2W1lqXti79KkWtFmsXtVVb26oVbe23rVoUUepSRRFUQFZRIIpCQoCQPZlMZu7vj8md7MnMzezzej4efdRMJskn4cy9d84953wq9OBrvurADV/U6uJH31VJXnrAx/fmVo9e2lQpaeBdutC3ktx0faj+B3CbA4kDmeuXE8BsF6s6ZkpFJwFpHhNXba/SK2+t1hknHaeZ4wM7Zgc7U2p/kPOkTJ93G5T75sf7I3LddGih7/d7efNevbzZVz0Q7Ot5+SZfK8zXgnw9J8t1WDAyU1N0w5wjdPO/NuruZVvVOa8SzL/LQKy17/liel8flVJ1FiqlzONOyNr32n+2x2uo2e0JWdX8Z9Xxm5Q6pCBTY4dmaef+Rq3acUBntiei+/OWv3Uv8Ju8sf56Xr6pQi98WNHlsR//c4PuOOcoy6+pPTW+uNhX36of/G29pNC+TmNV0LeXWlpa9Itf/EJz587V9OnTNXXq1C7/Q3zyl8f2csfArFJY/3lNj0ytORC6MMwzpZZvqtCCJWt7/PzK2hYtWLLWf/ESCxx2m2aOG6Jzjh2pmeOGxMyBM5Q6ZkpFtlLKrJKy2YKbLxAoc/eT6vZKqfWf16jB1aaCTKeOGhGeHYv6im1Jeuj1HTEb27eddZRG5meoorZFj739ach/1vr21r0jhuf473oidPzDzi0O0jRvUpx6ZNGgj3Xma+Bgt92Xgjm+/2frXjW42jQyP0NfGp1cM2NCKZAd+A62V0rlBzHovKnVE/I7vdFq3+vMYbfpuDGFmjbU0HFBvFEy/877GlxyB7B71b4gd96TfK+rG//+YY/Hw33dtHxThR56fcegfu4rWyrV2OrRoYWZmnZY8JWyyXAdFiwz+dK90CdU8eDxGvp0v+/NdHCVUr6Y7rNSysJMqSz/TKm+WxX9u+8FkJTKcDr8MRSquVJer9GpfS/+klJS5xa+fQM+1zAMyztZx+rr2bx26b4T+b56l+XX1PJNFfrh/23o8Xgsvt8NtaDP5FdeeaXuvfdeHXbYYTrrrLN0zjnndPkf4lNvQ85NRwzPVmaqQw2uNv9dEFNH+174Zkp5vIYWLd2i3i5nzccWLd2SFKWNsaJjplRkK6Vq/WXcKWE5KXWfKfXWx+YuIcNkD8PP6y+2TbEa2+lOh2484whJ0kNvbNfBfgYjW9Ex5JzWvXDwDzu3OEhz3S5z3tfg/n1CdXw3W33OmzIyLK/VZBHIAG5/pVQg7XudrilCXS3lr5SKYlLKqiFZqXI6bDKMvitEOvPvvBfgkPNoXTeF6uf+q70V99wpI9nkIgQ8XkN3vbi118+FKh6+ONikVo9XaSl2jQii+rbIHHQ+0EypoAad+6onQ7X7ns1mC/mu03vrW9Ta5pXDbgvq7xVLZrUnlwKZK/VJVYP21rmUlmLX9NHxf10XjmNssr/fDfr28wsvvKBly5bpxBNPDMd6ECX+gX+9JKVSHHYdPTJPq8urtW7XQf9WlVLHoPNw7r63pry637u2hnx3ddeUV2vmuCFhWwc65ERpppS5FXkgd+it8Cel2u9Kv9l+oj1pgIGMVsV7bJ977Eg9+la5tlbU6bevb9dtZ4Wupdsc+s6Q8/CY4N+BL/hKqdomt3bs8w20NXdotSoUr4H9DS79tz2BfB677g1KIAO4a5rNmVIDH4fTUhxKddjV6vGq0dUW0rZrc6ZUpHffCwW73abhuen64mCzKmpbBnxTur/9WmtogNda0Tq3hOLnVtW3+CsqaMUNjUjEg3nTeszQrKBuGprte1X1A8yUCqCiydR5ppRhGL0mNutdwX3fnHSnDja5Q3bda+68NzI/I6g5cbHk+LGFSrHb9NmBJu060KRD+6n4MhNXM8YUJsTMx3C8puL9PcFgBf0qGDlypHJyBjfAC7HHPMjm9nHBaN4N7z5XqjoCg877OlFZfR4Gr2MnkuhUSoVjnpTUMa+jurFVtU1ubfiiRlLHQMdQi/fYttttuuUrvoHXT676rMf8EqvaPF7/337KIJMe6J25K9en+xvV3Brcbkwftv/bjB6S6U/kWhWK18DSD/fI4zU0+ZC8oNpG0FMgA7gPNga32UR2mOZK+dv3nPH5hi6YYefmoPNAZ0pF69wSip/77/V75DV8NyTGDI2/XcliUSTiwcrOe1LnQeeuXoeId+y+F0ylVMcMKPM40df3DWT3Pd/P9z2ve6uWVfHeuif5EnVT298fvrW9/xY+M9F8cpCte7EqHK+peH9PMFhBn8l/+ctf6qabbtJnn30WjvUgSvqrlJI6qhU678DX4vb4LzLD2b5nnrBC9TwMnhknkZ4pZbaNhCsp1VEp1ap3duyX15DGF2WHrbQ6EWL75COGadb4oWr1eHXfK9tC8j0/qqxXi9urnLQUkgxhMiw7TUOyUuU1pI/3BtfCF8rWylC8Bjq37mFwOg/g7muXqY4dUANLSJpvEEN9EyOe2/ekzvO7+q5KM5nte4HOlIrWuSWUr+dgB5yjb5GIBys770ny78LW2ubtdV5TXRADyU1Zqf23DRuGEVT7ntSx+1+ornvjeee9zvxzpT7uu4XP1ebp2DQojDtZR1I4XlOJ8J5gMII+k0+fPl0tLS0aO3ascnJyVFhY2OV/iE8dM6X6qJRqr1bYtrfef4A3q6RS7LagdsUI1owxhSrJS1dfxcA2+VoOZowh/iLFvGMVtUqpAO/QB8tsQz3Y1OpvB5oVptY9KXFi++b2aqnn1+/Rpt21g/5+ZuvesYfmMx8oTGw2W6e5UsG18K373JwnlT/odQz0GpB8lbh9vQa2VzVowxe1cthtmjd5xKDXk+yK2ltpXG1e//G2O3PQeUGglVIBbM9uRTy370kdrZLBVEoFso26FL1zS0Cv5+y+X88f763X5j11SrHb9NVjeD2HSiTiwcrOe5JvPqV5o7G3ChArM6XsdpsyU33Hhd524Gtxe9XWPpcn0GRXqGepfpYAlVJSRyfBOzv2q62PTRs++OygWtxeDctJ05HDE6PbKhyvqUR5T2BV0Empiy++WLt379bdd9+tBx98UL/+9a+7/A/xqd7Vf6VUUW66RuZnyDDkb6sxk1KFWalhHUTpsNu0cJ5vVk33n2J+vHBeaczsxpAMzDipd7VFdOBeuCulzBkpXkNavtm3vfzJYbyrkyixPWlkns491vcG4p6XtvZZYRGo9WYlDq17YdWxA1/glVKGYfjbuI8Nwb9Pf68BU12LW29+3HtrwLPrvpAknXLEMA0JYmcy9C7d6fC34/c228LV5lFTe7tnfkaAlVJm+17IK6Wiv/veYBSbVWl97DrWWbDte9E6twTyeq5vbutzty5zwPmXjywadGswOkQiHna2zxm0Ut3csQNfz2HnVmZKSf1XaJrDyu02+ZNXAwn1LNVdB3x/r3ivlDrmkHzlpqeorqVNG/q4KflWp/msibJxQTheU4nynsCqoM/k77zzjv7v//5PN910ky6//HJ961vf6vI/xKeBKqUkX9WC1NG6YV4kReLCoWxSiR6eP9Vf7m4qzkvXw/OnqmxSSdjXgA45YdxRqT/+tpEwJaWcDrt/TkpNk1tOh03HjQnvMMFEie0bzzhSqQ673t5+wD8g3iqzEudYhpyHlVkptSWISqlPDzSppsmttBS7JhTnhmQdfb4GctM1aUSu3B5DV/3lfT3X3tZj8noNPbdujyRa90Kpvx34aps63swFPCDYXykV2nZvf/tenM6UCrRSyus1/JvKDM0J/HorWueW/l7PpSW5avV49e0n3tfz63u+ns3HvsaGBSHX179LUW7aoOPhYGOrf9diK3PAivoZdm4mlYK9GWkmpXqrlDJvxGenpQScJAn1LFWzUurQwviem+aw2/zVUn218JlJ6ERp3TOF4xibKO8JrAi652rChAlqbh64/x3xZaCZUpKvauHFDRX+pJRZKRXonbvBKptUojmlxVpTXq2q+hYV5fhKGBM1YxzL0lIcSkuxy9XmVV2zO2yVS93VhXnQueRrSTErssYPy47ILiGJENujCjP1zZmH6U8ry3XPsq1y2m3a1+AK+nepaWr133E9dlT8bxscyya0Dzv/qKKuzx2Kulu3y5cwPHpknlJDWKHS12vAaxj68T826Nl1u3XdM+tV3diqb50wWmvKq/XOjv3aXdOs7FSH5pQOD9lakl1JXro276nrtVLqYKdq1UBba7PTwzVTKr7b9/pL/nVW2+z2txoFO78zWueWvn6ux2voh//3of794R5d98x61TS5Nf/4w7SmvFpvb9+nitoWZac5dNqEorCuL1l1/nf50f99qC9qmnVz2YRBvdH1eA39+0NfMnFIltPSNdPw9hk5VfVdK6XcHq+a3b7kc7CVUuYOfI2tvVVKBTdPqvPPD8VMqdpmt/86s78d6+LFrPHDtGxjpVZu36cfnH54l88daHBp8x7fja8TwzgOI1rCcYxNhPcEVgSdlPr5z3+uG2+8UT/72c909NFHy+ns+oLOzQ3NnVNE1kC770mdd+A7KMPouHMXyRJrh92WkNtgxqOcdKdcDa6IzpWqCXNSavmmCn1xsCPpvrWyXrMWv6aF80rDfnciEWL7mlPH66nVn+mjynpd8sfV/sdL8tID/huarWGh2NkN/RtflK0Uu011LW3aU9uikQEM9O8Ycp4f8vX09hpwyKZfXjBZ+ZlOPfb2p7rjhS361YptanB17BjoMaQ3tlUl9B3ESBru3xWu5w3IGv88qcBfm+GfKRXflVJ761rk9Rp9Jvn2tVel52c6LSWCo3Vu6fX1bLfpNxceq4JMp55Y9ZkW/nuzfvHyti6xwes5vMx/l69OLtHv/7tTK7cf0HlTD7H0vZZvqtCipVv8CewDjW5L10zDcs32va4J2s7Xl9kB7pJnykpz9PgeHd83+JbAUM5SNXcqHpqdGvTvFYvMYedrd9WovsXdJdn39o4DMgzfuIBEHdAdjmNsIrwnCFbQZ7eysjKtWrVKs2fPVlFRkQoKClRQUKD8/HwVFHBXO17VBXDX4KgRuXI6bNrf0KovDjb7S3XN4dBILh3b40ZuB76OXZ9Cn5RavqlCC5asldvTdR5SZW2LFixZq+WbKkL+MxPN6vIDanH3HHQZzN/QTEqFYmc39C8txeGf//FRgC18/tbKCFax2e023X5Wqc6e7HuT0zkhJUnNbg+v0RAykyX9VUoFcwwO30yp+N59b1h2muw2qc1raH9jz1k6pv31wc2TinV2u00/PfsonXWM+XruGhfNrbyeI+Hkw4dJ8rVWWZkDaV4zdT9OWLlm6qtSyqxKykp1KMUR3Os8u323vMZu5wtJnXbeCzwh5K+UCsE172cJsvOeaVRhpsYMzZLHa2jVjgNdPvdW+zzIk48YFo2lIY4EnZ59/fXXw7EORFm9f6ZU3yGR7nSotCRXH35Rq3Wf1+hA+927IVQzJKWcEPfXB8K8S99fRZ8VHq+hRUu3qLfLMkO+AYOLlm7RnNLihC+ftcr8G/YmmL+hWYkTiiHaGNiEkhxt21uvjyrrNXti/y1wza0efVThG4oejkqp/ngN6b1PD/b7HF6joWEO4K7sZQB3bbPvGJwfRKVUTrgqpcz2vQi0WIdDisOuYTlp2lvnUmVtS59VBPv8Q84T51rLa/h25OoPr+fwmnZYgdKddlXVu/RJVYOOCGJXtFBfM5kzpfZ1G3Tu33nPwjVfdlrfu+9Zad/LDWEb8mfVvhEFhw2J73lSnc0aP1Tl+xu1cvt+nXFUsSTfxigrt7cPOT888Vr3EFpBJ6VOOeWUcKwDURZIpZTkq1748Itardt10D9Tih2PklNuCPvrA1Xb7IvTQHd9CtSa8upeqwJMhnxVA2vKq5OunDZQofgbdt7ZLdJJj2Q1sSRXz6/fE9Cw8017atXmNTQ8N81fTRMpvEYjJ9SVUuZsl/pwJaXitFJK8iUA99a5VFHbomP66KDa3xDZ+Z2RwOs5+tKdDs0YM0RvfrxPb368L6ikVKj//cyE7N5ug87rLLTZmbL6SYZb+b7moPNQXPPuaq+UGpUglVKSL+n05Luf+Xfak6Qd+xpUUdui1BS7vjS6MIqrQzywdCZ/6623NH/+fJ1wwgnavds33O7JJ5/UypUrQ7o4RE7HTKn+D9BTOu3At78x8jOlEDs6diKJTFLKMIyOQechbt/rbceXwTwvGYXib1i+v1G1zaHd2Q39m1DcMex8IOaQ8ymjCiK+rTOv0cgxB3Dv7TUp1V4pFcSNAf9MKdr3eijxz+/qO273tbc0DctJnKQUr+fYcLK5a1qQO+aG+t9vuLn7Xp2rSyuheX3Z387gfelvll1Dp933AhXK7gCzfe+wBEpKzRw3RA67TeX7G/0zs95s343vuDGFEdk0CPEt6DP5P//5T5155pnKyMjQ2rVr5XL5Tpa1tbW6++67Q75AhF+L26PW9juOA1VKmS01W/bU+YegJlJJOQJnJjAj1b7X7Pao1eOL0/wQt+8FOnwxUYc0hkIo/oZm616od3ZD30pLfMm/8v2NanH3nL3Rmb+1MgpVbLxGI6e4PVFS72rrcdOhtr1SqiCIGwNmNUL4Bp3H75ud4n6q0kz7GxJrppTE6zlWnNQ+V8o3D7L/439nof73M5/X7PZ0OU7UNQc/+8lkJpxC1r4Xwjmqu9qTNoclwM57ppx0p6a2XxuYLXu07iEYQV/133XXXXrkkUf06KOPdtl578QTT9TatWtDujhEhnlwttk6Zj/05dBC345YrR6v9rb3fhcGuUUxEoN5Mo/UoHNzyHmK3abM1NC+CZkxplAleenqq/bDJl9Ly4wxlB/3JRR/w44h2vkhXx96NywnTYVZqfIa0sd76/t9rr+1Mgr/PrxGIycrLcXfnt19Nyx/pVQwg87bBw6HvlLKnCkVvwlss1Wyt50OTWZSalgCJaV4PceGI4ZnqygnTS1ur9YOMOOrs1D/+2WkOvzvP/Z2misVyM7gfemvfc/K7nvmNW9Tq0dtnp4bugTK1ebRnvbX+6EJlJSSpFnjO4bnu9o8/qHn5uNAf4I+k2/btk0nn3xyj8fz8vJUU1MTijUhwsyDc3ZqSp9bEptsNluPNyS07yWnUA59DERN+x36vAxnyFuHHHabFs4rlaQeF1nmxwvnlTJwtR/9/Q0l34yJgf6GZiUOO+9Fjs1m08QSXwvf1n5a+Cpqm1VR2yKH3aajD8mL1PL8eI1GVl8VPDX+mVJBtO+Fq1IqAdr3gqqUykmcay1ez7HBZrNpVnsVy5tBtPCZ/369DTq3+u9nDjvv3PJXZ2GXPFN/lVLmscjK7nvS4K57dx9slmFImamOhEo0S9JJR/hi6e3tB/T+pwfV7PZoaHaaf0wA0J+gz+TFxcXavn17j8dXrlypsWPHhmRRiKy6IHe36DyA2GGTtuyplccb/HayiG9WKqXM7WKfX79bq3YcCCpuasM0T8pUNqlED8+f6n+TYCrOS9fD86eqbFJJWH5uIunrbyj52sT6+xs2t3r0UWV0dnZLdub8rq0VfVdKrW9PGE4ozlFmavBvEEKB12jkmDvw9ZWUKggmKRXu3ffiuH2vpP3v3L0irTP/TKnsxGpl4/UcG04+vKO6JRhlk0p0zMiesx+t/vuZLXxmvEudK5oszJTqJxlebyHZ5XTYldE+F2kwSanP2lv3Di3MjPhsxnA7ZmSectIcqm12a9HSzZKkWeOHDFjwAEgWdt+76qqr9IMf/EB//vOfZbPZtGfPHq1atUo//OEPddttt4VjjQizYMtY3Z6ORILHkC5+dLVK8tK1cF4pFxFJJNiZUss3VWjR0i1d3uQEEzedK6XCpWxSieaUFmtNebWq6n1bdM8YU8jd2iB0/xvKkG78v/XaUlGnlZ/s99+V7W7jbl9yOxo7uyW7iSVmUqrvSql17a170W6t5DUaGX0N4K5pDr59r/NMKcMwQvZGrGOmVPxWSnXe6bC3v43Xa+iAufteAlVKmXg9R9+J433n5M176nSgwRXwjtoHGlza0n4j477zj5EzxT6ofz+zUqpzgtacKWVl0HlH+17PWVn+Cqy04L5vbkaKmt2eQY2tMHfeOzSBhpyb/rN1r1rb3yN+vLdBkvT6tn1avqmC94cYUNBJqZtvvller1ezZ89WU1OTTj75ZKWlpemHP/yhvv/974djjQizYA76yzdV6IFXP+nxeGVtixYsWcvdrSRinswD2R53+aYKLViytkepdzBxY/6cUA85785ht7EF9SB1/xuu+7xGj7/zqe55aauWjpvV610zc2e3Y0flJ9zdw1jn34Gvsr7PpMH6GGqt5DUafr21lRmGoYP+9r1gZkr5LjU9XkMtbq8yQjQTMBHa98w34q42r2qa3CroNg6httmttvaK4iEJOr+T13N0DctJ08SSXG2tqNPK7ft1zrEjA/q6FzdWqM1raNLIXJ0/fdSg1zG8PRFeVddbpZSV9j3fcabX9j1zbEmQ3zcn3am9da5BJaU+S9CkVF/X+bXNbt4fIiBBn8ltNpt+8pOfqLq6Wps2bdK7776rffv26c477wzH+hABgR70PV5Di5Zu6bWH3Hxs0dIttPIlCbPdc6BKqVDFjb99L8xJKYTe908br5y0FG3eU6elG/b0+hzmSUXP+KJsOew21Ta7e51t4/Z4tWF3jSRaK5NFbwO4mzvt1BvMTKnMVIfMPGe9K3QbYyRC+15aikND2hNRvb32zHlSeRlOdiRF2JzcXsH8VhBzpf61drck6bwph4RkDUU55kypzkmp4MaLdNb/oHNrs6pCMUt1V3WjpMTaea+/63wT7w8xkKDPcEuWLFFTU5NSU1NVWlqqGTNmKDs7OxxrQ4QEurvFmvLqfodxGvJdVK0prw7l8hCjzJP5QHeMQhU3HW0jidfCkOiGZKfpO18eJ0n6xcvb/BUOnUVzZ7dkl+50aNywLEnSR5U9W/i2Vdarxe1VXoZTY4ZkRXp5iAKzUqqyU9WC2ULtdNiUFUS1k81m65grFcKNMRJh9z2p89+65w58+8yd93ISs0oKscFsq1/5yX4ZxsCJg537GrT+8xo57DadPXlESNZgxniX9r1BVUoNnJQKti3QP0s1gA6BvvgrpRLoXMr7Q4RC0Gfy66+/XkVFRbrkkku0bNkyeTw931wgvgR6x6DzjhiheB7im5nErBvgTUao4saslLJyxwzRd8WJYzQ8N01fHGzWk6s+6/K5itpmVdZFb2c39D/s3GytnDwqn4GlScIcwN25UupgU8eNgWBbbHNCPOy8zeP133WP5/Y9qetcqe7Moc9Ds7kZg/D50uhCpaXYVVnXou1VDQM+/7l1viqpkw4fGrKEqdm+t6+3Sikrg87bjzmtbV65PV7/420er5rdni7PCVSgHQJ9MQxDu9oHnR+WQO17vD9EKAR9Jq+oqNDf/vY32Ww2feMb31BJSYm+973v6Z133gnH+hABZsZ/oIO+uTPGQAJ9HuKbmcRsbfOqxd13cjpUcROJQecIn4xUh26Yc4Qk6bevb/cnGaWO1r0jh0dvZ7dk19+w83VUsSUds3rnYJPbf3yvbbI+18+/E1aIKqXMKikpvtv3pE6VUr2277UPOU+wreMRW9KdDs0YUyhJenOAFj7DMPTserN1L7D5U4HorX3P38lhoVIqq1PCqfNcqc6J8eBnSgXWIdCXqnqXXG1eOew2jSzIsPQ9YhHvDxEKQSelUlJSdNZZZ+mpp55SVVWVfv3rX+vTTz/VqaeeqnHjxoVjjQizQCulZowpVEleuvq6P2qT746feWJDYstOTemYE9LPG41QxU1thAadI3y+PvUQHV6UrZomtx5+Y4f/cbMSh3lF0TOhxDfsvLekVMeQ8/wIrgjRlJue4t/+3EyWmEPOCyy0UJsVCfUhqpRq7ZSUivdZS2ZVWn8zpUhKIdxO8rfw7ev3ee9/dlCfVzcrOy1FZ5QWh+znF7VXSjW42tTYvlPnYGZKOR12fxVl52tU878znA45HcEdO8yb91YrpczWvRH56UH/7FjG+0OEwqBeEZmZmTrzzDP1la98RYcffrg+/fTTEC0LkRToTCmH3aaF80olqceBx/x44bxStvJNEnZ7x5yQ/u4adY6b7oKJmzoGnce9FIddN5VNkCQ99na59tT4WoP886QYch41pe2VUuX7G7tUPh5sbNXO/b7BrMdSKZU0bDZbj7Yyc65fXhA775my29/MhbpSyumwxf01R3H7m/HOs3RM++uZKYXIOOnwYZKkd3dW9zr30WQOOC+bVByynTQlX+LanFVXVe9SU6vH36JrZaaU+T0lqbG1Z1Iq2CqpzuuwOlPqswPtQ84LE2eelMT7Q4SGpaRUU1OTnnrqKc2dO1cjR47Ub37zG5133nnavHlzqNeHCKgLYheKskklenj+VH+5uak4L53tPpNQoHeNyiaV6H/P6pmYCiZuapqD34ocsWf2xCLNGFMoV5tXv3plm1Z+ss/fvncM86SipignTQWZTnkN6ZO9HTNF1n9RI0kaOzSLTQaSTPcB3DX+Sqngj8GhnillvmmO99Y9aYCZUuagcyqlEGYTinM0NDtNzW6PPvjsYK/PaXF79GL7DrpfC2Hrnsmslqqqa/FfV6bYbf6qzWCZLXyd2/cC3XG8N4OdKWXOkxqVQPOkTLw/xGAF/Yq86KKL9MILLygzM1Pf+MY3dNttt2nmzJnhWBsiJNCZUqaySSWaU1qsNeXVqqpvUVGOrySTDHjyCeaukXkHzGGXPF5p9NBMvXrDlwOOm1oqpRKCzWbTLV+ZoPMeekf/WLtb/2i/6ypJl/1pjX56dikXL1Fgs9k0sSRX7+w4oK0Vdf6B82br3rG07iWd4m7JkoON1ndA7W8nLCv8O+/FeeueNNBMqfb2vRwSwggvm82mkw4fqmfX7dZbn+zXCeOG9njO6x9Vqa6lTSV56Tp+7JCQr2FYTprK9zdqb71LBVm+mM9JTwl6YwWTmZTqrX0vx8Lw9NxBzpQy2/cOG5J4SSmJ94cYnKDP5g6HQ3//+99VUVGh3/72tySkEkCgM6U6c9htmjluiM45dqRmjhvCASdJBXPXyKyGmTPRN4Ogqs6lQMPG6zVISiWQ3tpUzMcXLFmr5ZsqIrwiSJ124KvsmCu1jtbKpFXSLVkymGpVs1XGaoVBdy534iWlGlxt/ioO0/56Bp0jcjrmSvU+7Pxf7bvunXPsyLDsxDq8S6XU4HdczvFXSnW0I5qJ8Zwgd96TBj9TKhF33uuO94ewKuizudm253DEf8k0fAKdKQV0F8xdI3Nu0DnHjlCK3aamVo8q+0hOdFfvapNhtP9M4jSuebyGFi3d0uvn2v+JtWjpFv8sCUTOxG7Dzr1eQ+vNIfTMk0o6xe0DuP1JqSZfgsTKoPMsf6WUtQqD7vztexbbemJJZmqK/1zauVrKMAwdaGTQOSJn1nhfUmrTnlodaHB1+dzBxla9sa1KUmh33evM3IFvX71Ldc3B3zDvLivNd3wIVfveYHffM5NShyZopRQwGJZuMf33v//VvHnzNH78eI0fP15nn3223nrrrVCvDRHg9Roddw0GceBHcuq4a9T/Cbq+xa2Pq+olSdNGF/hPyDuqGgP6OeZW5OlOu9IT4E1IMltTXt3r7BSTIV+70Jry6sgtCpKkie3Dzj+qrJdhGNq5v1F1LW1Kd9o1oTgnyqtDpJXkmjOlzKSU9R1Q/TOlQjzoPBEqpaTed+CrbXbL7fEl54dk076H8CvKTdeE4hwZhvT2jgNdPvfChj1yewyVluTqyDCdD8yk1N66lo4b5hba7ExZvbQNBzNHt7vBzJSqb3Grur0F+rAhiTXoHAiFoM/mS5Ys0emnn67MzExde+21uvbaa5WRkaHZs2fr6aefDscaEUYNrZ0qUAZx4Edy6pgp1f8JesMXtTIM6ZCCDBXlpGvcsGxJ0o59Df1+ncls3cvP4MI83lXVB1YdF+jzEDrji7LlsNtU0+RWZV2Lv7rxmJH5Skmg7asRmB4zpZoGMVMqPVyDzhMjLnubK7Wvfee9vAxnQgx0R3zoaOHb1+XxZ9tb9742NTxVUlKn9r1616CSR6beZtmZ/52dZiG53mmOqmEEV81tzpMakpXqXxeADkGfzX/2s5/p3nvv1TPPPONPSj3zzDP6+c9/rjvvvDMca0QYmdn+VIc9YS7uEDkdd436r5RaZ7YAtc+lsZqUYp5U/CvKSR/4SUE8D6GT7nRo7FDfHdyPKuo7vW7zo7gqRIuZKNnf4FJrm7fj5oCVmVK9DBwejI6ZUomRrOltBz5z572hVEkhgk46fJgk6a1P9vsTL5/ub9TaXTWy26SzJ48I2882K6Wq6l0dM6UGccM8O9S777Wvpc1rqKX9GBSoRN55DwiFoLMQO3fu1Lx583o8fvbZZ6u8vDwki0LkmLumDWZ3CySvjv76/t9omBUXx7bPpRk3zPfGN9CkVE2z7w59noU3Q4gtM8YUqiQvXX0dbWzyvUGbMaYwkstCuwntLXxbKur8mxOQlEpOhZmpSnXYZRi+dhqzfc/KTCmzUqqxNcTte87EuJnmr5Sqa/Y/tr+BIeeIvBljCpWaYldFbYv/Gs2skpp1+DAV5YbvhlFRbqf2vWbru+SZemvfs7K5kykz1eEf3B3sXKlE33kPGKygz+ajRo3Sq6++2uPx//znPxo1alRIFoXIMQ/ODI+GFYHMlDIMo8eb23FFvkqpnfsCnClFpVTCcNhtWjivVJJ6JKbMjxfOK2XHligxh52v23VQH7XvwnfsKHbeS0Z2u03D83xvErdXNaitffMBK5VSoZ8plVjte913OpSk/e3te8NySEohctKdDs0Y7bspZFZLPbe+vXUvTAPOTWbCq76lzd++mpsx+Pa9zpVSDYNIStlsNv/XDdQh0N2uat/1biLvvAcMRtCvyBtvvFHXXnut1q9frxNOOEGS9Pbbb+vxxx/X/fffH/IFIrw6V0oBwTLvYPU3U+rz6mYdaGxVqsOuo0b4qjDGDfUlpSpqW9Tgahuwv968Q09SKjGUTSrRw/OnatHSLV3aVYrz0rVwXqnKJpVEcXXJbWKx7zX6+rZ98hq+N8tmFQeST0luhj6vbtbW9gSl1c0mQj9TKrHa94p7GXTe0b5HUgqRddLhQ7Vy+3699cl+HXNInj470KTMVIfOOGp4WH9uTlqK0p12tbi92t5epTWYSqnejjsdlVLWvm9OeopqmtyqHWCWancdO+8x5BzoTdCZiAULFqi4uFi//OUv9fe//12SNHHiRD3zzDM655xzQr5AhFe9a/A920he5h2s/sqY133um0tTOiLX/wYiL9Opodmp2t/QqvJ9jTr6kLx+f05ds/VdnxCbyiaVaE5psdaUV6uqvkVFOb6WPSqkosvcgc/TXhVzaGGGPF6Df5ckZSYkP6rw7Z5qpXVPCudMqcSolCruttOhRKUUomfW4UOll6S3t+9XU3tC58yjhiszNbw3sG02m4py0rWrukk7q3xJqdxB3DTvffe9wd2M971fag66Uor2PaB/ll6R5513ns4777xQrwVRMJjeaiAnfeDtcc3WPXOelGnssGztb6jWjn0NAyalqJRKTA67TTPHDYn2MtDJul0HZbPJvyvr6vKDmrX4NSrYkpTZVma2clo9Bue073TlavOqtc2r1EEmk/ztewk2U6qmya3mVo8yUh3az6BzRMln+5tkt/ler++WV0uS/vvxfi3fVBH288Dw3DTtqm5SvWvwM6Wy03w3QhtdHv9jHbvvWXvfE+gs1c5a27zaU+ObF0f7HtC7xDibwzKzAoVKKViRmx5IpVSNpJ7DkoPZgW8wuz4BCMzyTRX67lNr1X2n68raFi1YslbLN1VEZ2GIGjNZsqN9/p/VSqmstI42u8YQtPAlWvtebnqKMlN9v4tZLWUOOqdSCpG0fFOFvvf0Wnm7nQcONrZG5DzQfefdwcyUykoNffteILNUu9td0yyv4Wt/5vUM9C7gV/rYsWMDet7OnTstLwaRR6UUBsM8qTe42uT1GrJ3a/FpcXu0ZU+tJGnqoV2HJZs78AUy7NxMSjGQHwgPj9fQoqVbZPTyOUO+IfSLlm7RnNJiWvmSiFkp5RnEkHNJSnHYleF0qNntUYOrTQVZg6v+6UhKJca9VZvNpuK8dO3c16iK2maNGZrlH/TMTClESiycB7onbQZz07z7TCnDMPz/bbUtMJBZqt19dsB3nXtoYSY7nQN9CPgV+emnn+qwww7TJZdcoqKionCuCRFkVrjwZh9WmMlMw5AaWtt6XDxs3lMnt8fQ0OxUHVKQ0eVzwVRK1bD7HhBWa8qruwxZ7s6QbwjzmvJqWi6TyPBu27/nW6yUknxvEJvdnpDMlXK5E2v3PcmXANy5r1GVtS0yDEMHGklKIbJi4TzQ/ZgzqKRUt933mt0ef4I923JSKvjd9/xDzgsZcg70JeBX5DPPPKM///nP+tWvfqWvfOUruuKKKzR37lzZ7YlzQZCM6qiUwiCkOx1KTbGrtc2rumZ3j4uH9e2te8eOyu9xd8hMSu3c3zjgIGX/oPNBvCEC0Leq+r7fiFh5HhJDSV7XmwkFg2ihzklL0b56V0h24PNXSlnYCTBWFed27MBX2+yW2+N78zyEmVKIkFg4DxR1q5QazPsTc9B5U6svGWUmxB12mzIsHjvMm/j9ja3obhdDzoEBBZxRuuCCC/TSSy9p+/btmjZtmq6//nqNGjVKN998sz755JNwrhFhxEwpDFZuet+7Kq3b5dt5b0q31j1JGlmQ4U9o7T7Y3O/PqGnyzdagUgoIj+5zPAb7PCSGYTlpXW4YDGauX0crTXC7VvUm0dr3pI5Wyb11Lf4h57npKQkzNwuxLxbOA0W5oUtKdR5m3tja5q9uyklPsdxG1981b18+qyYpBQwk6LP5yJEj9ZOf/ESffPKJnn76aa1evVoTJkzQwYMHw7E+hBkzpTBYuf7++p5vNMyd96Z023lP8t2pGjvUV8rcXwuf2+NVY6uvVSOfpBQQFjPGFKokL119Xabb5HvTPGNMYSSXhShz2G1dKhcG1b6XFvybub74d99LoKSUOVS+orZF++oZco7Ii4XzQOf2vcxUh1Ic1l/jaSl2pbQn1Rtdbf5jj9Wd96T+r3n7YlZKHcrOe0CfLL3SW1patGTJEi1atEirV6/WBRdcoMxMXmjxiJlSGKycPu4aVdW3aHdNs2w26ZheklJSYHOlOp/4iVMgPBx2mxbOK5WkHm9IzI8XzitlyHkSMpMl0uBuDJhvBEPSvudOrN33pI5KqcraFu1rYJ4UIi8WzgOdk+CD7eKw2Wz+Fr7OSSmrO+/5vja45LphGP6ZUocNYaYU0JegklKrV6/W1VdfreLiYv3qV7/S1772Ne3evVt/+9vflJbGiTMeUSmFweqrv359e5XUkcNz+rwrNXaYWSnV9w585pDznPQU3hADYVQ2qUQPz5/aJQkh+ZISD8+fqrJJJVFaGaKpuFM7zZ6aZv+g4GD5k1IhqZQyZ0olTqWUWSFSUdui/ebOe1RKIcKifR7Iy3DK6TCv9Qyt2nHA8jFH6pwM94TkPU8wM6U8XkPLN1Wq2e2RTVJxLu3vQF8CflUeddRRqqqq0iWXXKL//ve/mjx5cjjXhQhhphQGq6+7Rus6DTnvSyCVUrXsvAdETNmkEs0pLdaa8mpV1beoKMfXqkFCODkt31Sh/3683//xT5du0e/f3KmF80qDfnPafXv2wTDb91IH0doTa8xKqf0NLlXU+uYsDqNSClEQzfPAy5sr5W1PQlXWuXTxo++qJC/d0jFH6poMN+fZ5QyifS/QSqnlmyq0aOkW/26GhqTTfvmG5d8DSHQBvyq3bt2qrKws/eUvf9GTTz7Z5/Oqq6tDsjCEn6vN47/bSFIKVvXVX98x5Dy/z6/178DXX1KqiaQUEEkOuy1s230jfizfVKEFS9aqe41CZW2LFixZG3TVRGhnSiVepVRhVqpSHXa1erzavKdOEjOlED3ROA+E+pgjSVlpvhbfhi7te4OfKdXfcSwcvweQ6AJ+VT722GPhXAeioPMBNZv2PVjkv2vU6e63x2towxe1knrfec9ktu/tb2hVTVNrr0N0zUqpwez6BAAInMdraNHSLT3eVEm+O/42SYuWbtGc0uKAqyfM64xGZkr1ymazqTgvXbuqm7Rxt+/8OTTb+mB5IJ6E45gjqctMqboQzpRqcLXJ4zV6rCVcvweQ6ALORHzrW98K5zoQBZ13oeDACKt6q5T6eG+9mlo9yklL0fj2aqjeZKWlqCQvXRW1Ldqxr1HTDut5AV7T5NuFiEopAIiMNeXV/raT3hjyzT5aU14dcDVFTigHnSfg7nuS/Ekp8/qMQedIFuE45khdN1gw59kN5kZ854RWQ0ub8rrdMA3X7wEkusQ6myMoHfOkqJKCdb31169rH3J+zKg82QdIeA7Uwlfb7Pu+eRncMQaASKiq7/tNlZXnSaGeKZV4lVJSx1wpE0kpJItwHHOkrkmp+paOjXOsSk2xK729bbi3Yefh+j2AREdSKomFYmtUoLedSPzzpEb13bpnGmgHPgadA0BkFeUEtktUoM+TpOy0gWexBCoRZ0pJ6rHjGTOlkCzCccyRurbvhep9j79DoJekVLh+DyDRJdbZHEExD6a5GVRKwboc/8m5443G+vad9/obcm4aaAe+mmZf+x4zpQAgMmaMKVRJXrr6qnO1yVfVM2NMYcDfMzuU7XvuxGzfK+m2ZfwQZkohSYTjmCN1HHcaXW3+Y89gdt+TOiqt6pp7HsvM36MvVn8PINEl1tkcQekoY+XNPqwz2z/r2yuaapvd+qTKl2A6dlT+gF8/UFKqjkopAIgoh92mhfNKJanHm0Tz44XzSoOaR+kfEByCSqlWT2K273WulMpNT0m43w/oSziOOVJHpVR9iNr3pI4OgfpeKqUcdptuP6u0168bzO8BJDrLSanW1lZt27ZNbW2Dv7hAdJgZfmZKYTC6V0pt+KJGknRoYaaGBDAPY1yRr31v14EmudvfaHRW00RSCgAirWxSiR6eP7VHS1lxXrqlLc1DVSnl8Rpye3x7WyVapdSwTi09WWkp8nh728MLSEyhPuZIXXf9DFX7Xm8dAp31lXAazO8BJLqgsxFNTU36/ve/ryeeeEKS9PHHH2vs2LH6/ve/r5EjR+rmm28O+SIRHlRKIRTM9k+zHdQcch5I654kFeemKzPVoaZWj3ZVN/krp0zmTKl8klIAEFFlk0o0p7RYa8qrVVXfoqIcX9uJlbv8nQede73GgJtg9KW1rePmRSLNlFq+qUK3P7/Z/3FFbYtmLX5NC+eV8iYWSSOUxxxJyk7zVRs2ujyqd3XsOj4Y/g6BXiql2jxeLV7+kSTpO6eM1SlHFIXk9wASXdBn81tuuUUffvih3njjDaWnd2SyTz/9dD3zzDMhXRzCy8zwM1MKg2EmNVvbvGpxezrmSQXQuidJNputY9h5Vc8WPjMplUtSCgAizmG3aea4ITrn2JGaOW7IIN4cdlxrNLZar5ZytXn8/53qSIyk1PJNFVqwZK2q6l1dHq+sbdGCJWu1fFNFlFYGRF6ojjmSlJUa2t33fF/fXinVy0yp//vgC+3Y16iCTKe+e+r4kP0eQKIL+mz+3HPP6be//a1mzZolm63jxXXUUUdpx44dIV0cwquOSimEQE5aisxDQV2Lu2PnvUMH3nnP1DFXqucOfDVmpRSDzgEgbqWl2OV0+E4Wg2nhM3feS7HblJIASSmP19CipVvUW6Oe+diipVto5QMsMCs0a5pa1eL2HTtyB7v7XkbvlVJNrW361YqPJUnfP+3wQf8cIJkEfTbft2+fioqKejze2NjYJUmF2NcxU4qDJqyz223Kbr8TtXl3nQ42uZWaYtfEktyAv0dfw85b3B5/qwYzpQAgftlsto65UoMYdu5ym0PO4z8hJUlryqtVUdvS5+cN+Vr51pRXR25RQIIwjzmVdR2vsay0wW0gkOufKdU1KfWnt8q1r96lUYUZuvT4Qwf1M4BkE/QZffr06XrxxRf9H5uJqD/+8Y+aOXNm6FaGsAtVGStgttb99+N9kqRJI3KVGsQbhr6SUuaQc4fdNugZAACA6DKrFuoHVSnla99LcybGznRV9X0npKw8D0AHc/c9s0oqM9Ux6ArLjplSHcex/Q0u/f7NnZKkH55xJDtnAkEK+l3e3Xffra985SvasmWL2tradP/992vLli1655139N///jcca0SYdMyUogIFg2MmNt/8xJeUCqZ1T+rYgW9HVYMMw/Anu815UnkZTioxASDOZac5JTUPrlKqLbEqpYpy0gd+UhDPA9Ch+w3NUNyIz+mlUurBVz9Rg6tNR4/M07xjRgz6ZwDJJugz+qxZs7R+/Xq1tbXp6KOP1iuvvKKioiKtWrVK06ZNC8caESZUSiFUzFLmne0zoQLdec80ekiWbDZfovRAY6v/8Zom33/TugcA8S8nrWPosFX+SqkESUrNGFOokrx09XXbxSapJM+3cxeA4HRPSoWi6r5jppTvOPbp/kY9tXqXJOmWr0ywvLMokMwsvTLHjRunRx99NNRrQYTVmbuakZTCIHVPbAZbKZXudOiQggx9Xt2sHVUNGpqdJqlrpRQAIL6Zs1xCM1MqMdpjHHabFs4r1YIla2WTugw8N9/aLpxXys5dgAWZqQ7ZbJLR/sIKxeZOHbvv+a5Rf/HKNrV5DX35yGE6YfzQQX9/IBkFfZtp2bJlevnll3s8/vLLL+ull14KyaIQfl6v4b9TyaBzDFbnFtBhOWkakRd8m0FvO/CRlAKAxJHdfr0Rit330pyJUSklSWWTSvTw/Kkq7nbuLM5L18Pzp6psUkmUVgbEN5vNpqzUjhunoegOMd831be0af3nNXpxQ4VsNummsgmD/t5Asgr6jH7zzTfL4/H0eNwwDN18880hWVRvqqurdemllyo3N1f5+fm68sor1dDQ0O/X/OEPf9CXv/xl5ebmymazqaamJmzrizeNrW3yhvCuAZJb551MDivMlJWdq3sbdm4mpfIziVEAiHfZtO/1qWxSiVbedJr+etXxuv+iY/XXq47XyptOIyEFDFLna9RQ3IjPTPV9v5qmVt3yzw2SpK9NOSSoXacBdBX0Gf2TTz5RaWlpj8cnTJig7du3h2RRvbn00ku1efNmrVixQi+88ILefPNNXX311f1+TVNTk8rKynTrrbeGbV3xyuyDdjpsSk+gu42IvOWbKvTcut3+j9//7KBmLX5NyzdVBPV9+ktKUSkFAPHPrFIISaVUgrTvdeaw2zRz3BCdc+xIzRw3hJY9IAQ6z5Ea7Eyp5ZsqdP4j70iSPIa0tbJekjTtsPxBfV8g2QX9yszLy9POnTs1evToLo9v375dWVlZoVpXF1u3btXy5cv13nvvafr06ZKkBx98UHPnztV9992nESN63+XguuuukyS98cYbYVlXPKvzDzlnVzNYt3xThRYsWavuhVGVtS1asGRtUG0HY4f5jh87O7Xv1TSRlAKARGG+IawPyUwpbqgBGFjnRNRg2vf6uuaVpJ88u0mFWalUNgIWBX1GP+ecc3Tddddpx44d/se2b9+uG2+8UWeffXZIF2datWqV8vPz/QkpSTr99NNlt9u1evXqsPzMRGdeEDLkHFZ5vIYWLd3S68nZfGzR0i3yBNjLZ1ZKfX6wSS1uX3sGlVIAkDhC2r5HlTeAAGR1SUpZu57s75rXFMw1L4Cugs5I3HvvvSorK9OECRN0yCGHSJK++OILnXTSSbrvvvtCvkBJqqysVFFRUZfHUlJSVFhYqMrKypD+LJfLJZfL5f+4rq5OkuR2u+V2u0P6syLJXLv5/9UNLZJ8F4jx/Hsh/LrHjml1ebUqalv6/DpDUkVti1Ztr9JxAWxlnZdmU256iupa2rS9slZHFueopsn3WsxOtROnCEhf8QoMhNgJvwynrzK7rrnV8t+5yeX7OqfdFhP/VsQNrCBuIicrtaPVN8Np7bgR6mveeETMwopA48VS+94777yjFStW6MMPP1RGRoaOOeYYnXzyyUEv8uabb9bixYv7fc7WrVuD/r6Dcc8992jRokU9Hn/llVeUmZkZ0bWEw4oVKyRJ7++zSXKotbFGy5Yti+6iEBfM2DF9sN8XQwN55a3VOrA1sDtHhSkO1cmmf6xYqSlDDH1W4ZBk0/YtG7Ss8kMLq0ay6h6vQKCInfD5+IDvvPF5xT7L1x4bvvB9j6qK3Vq27POQrm8wiBtYQdyEX80+u8zmoPKPt2hZzeagv0c4rnnjFTGLYDQ1NQX0PEu9WzabTWeccYbOOOMMK1/ud+ONN+ryyy/v9zljx45VcXGxqqqqujze1tam6upqFRcXD2oN3d1yyy264YYb/B/X1dVp1KhROuOMM5SbG7+7Krjdbq1YsUJz5syR0+lU9epd0vaPNHpksebOPTbay0MM6x47piHl1frLJ+8P+PVnnHRcwHeN/tuySZ+u26O8Q47Q3FPH6dcfr5QamnTarOP1pdEFln8HJI++4hUYCLETfrnbD+ixjz9QamaO5s49wdL3+PjV7dLnOzV+zGGaO3diiFcYPOIGVhA3kbNm6Va9t9+XwD7xS1N15lHDg/4e4bjmjTfELKwwu84GElBS6oEHHtDVV1+t9PR0PfDAA/0+99prrw3oB0vSsGHDNGzYsAGfN3PmTNXU1OiDDz7QtGnTJEmvvfaavF6vjjvuuIB/XiDS0tKUlpbW43Gn05kQL0Dz92hy+7L4+ZmpCfF7Ify6vwZmji9SSV66Kmtbeu2xt0kqzkvXzPFFAe8gNH54jiTps+pmOZ1O/0ypITkZxCmCkijHbEQesRM++Vm+66uGVo/lv3H75nvKSIutfyfiBlYQN+GXk5Hq/+/8rHRLf+9wXPPGK2IWwQg0VgJKSv3617/WpZdeqvT0dP3617/u83k2my2opFSgJk6cqLKyMl111VV65JFH5Ha7dc011+iiiy7y77y3e/duzZ49W3/5y180Y8YMSb5ZVJWVldq+fbskaePGjcrJydGhhx6qwsLEzGIHqq65Y/c9wAqH3aaF80q1YMla2aQuJ2nzdLxwXmlQJ2dz2PmOfY3yeg1/Uio/kzgFgHhn7nw1uEHn7L4HIHCdd9yzuvteOK55AXQI6IxeXl6uIUOG+P+7r//t3LkzbAt96qmnNGHCBM2ePVtz587VrFmz9Ic//MH/ebfbrW3btnXpW3zkkUc0ZcoUXXXVVZKkk08+WVOmTNG///3vsK0zXtT5d9/jzT6sK5tUoofnT1VxXnqXx4vz0vXw/KlBb43bkZRqUL2rTeYmJuy+BwDxLzvNdyxvaGmTYVibu+LffY+kFIAAZHTaqXPHvgbLO+SF+poXQIeg0sVut1sTJkzQCy+8oIkTI9vHX1hYqKeffrrPz48ePbrHBc5Pf/pT/fSnPw3zyuJTXYtZKWXtjgFgKptUojmlxVpTXq2q+hYV5aRrxphCS3eLDhuSqRS7TU2tHn28t16S741HunPg4ZIAgNiW3X7N0eY15GrzWjq2u9y+SqlUklIABrB8U4Xuf3W7/+Mb/v6hfvHyNi2cV2opiRTKa14AHYLKSDidTrW09L0dJuJHvVkpRQUKQsBht2nmuCGD/j5Oh12HDsnUzn2NWvvZQUlUSQFAosh0OmSzSYbhuw6xlJTyt+9xswJA35ZvqtCCJWt7zICqrG3RgiVrLVc3heqaF0CHoG8zfe9739PixYvV1mZ9HgCir55KKcSosUN9LXxrd/mSUsyTAoDEYLfblJ06uLlStO8BGIjHa2jR0i29DiU3H1u0dIvlVj4AoRV0RuK9997Tq6++qldeeUVHH320srKyunz+X//6V8gWh/AxB50zUwqxZlxRlv6zVVq7q0YSlVIAkEiy01NU72pTQ4vVpFR7pZSTpBSA3q0pr1ZFbd/dPYakitoWrSmvpuoJiAFBJ6Xy8/P19a9/PRxrQQSZ7XtUSiHWmMPO99W7JJGUAoBEkp02yEopN+17APpXVR/YuJlAnwcgvILOSDz22GPhWAcizBx0zht+xBozKWXKy0iN0koAAKGWNdikFO17AAZQlJM+8JOCeB6A8Ar4jO71erV48WKdeOKJ+tKXvqSbb75Zzc3N4VwbwqS1zauW9juNVEoh1owb1rUlmMQpACQO87qjweW29PUMOgcwkBljClWSl66+9sSzSSrJ8+2cByD6Ak5K/exnP9Ott96q7OxsjRw5Uvfff7++973vhXNtCBNzyLnUUUYPxIr8zFQNyUrt9DFJKQBIFP72PWZKAQgTh92mhfNKJalHYsr8eOG8UjnsfaWtAERSwGf0v/zlL3rooYf08ssv67nnntPSpUv11FNPyev1hnN9CANznlRWqkMpDi7qEHvGDu2olqpubGV3FABIEGZSqt5i+16rv1KK6xcAfSubVKKH509VcV7XFr3ivHQ9PH+qyiaVRGllALoLuExm165dmjt3rv/j008/XTabTXv27NEhhxwSlsUhPMx5Urm0RSEGLd9UoU0Vdf6PH3/nU728uVIL55VyAQEAcS47fbCVUuZMKdr3APSvbFKJ5pQWa015tarqW1SU42vZo0IKiC0BJ6Xa2tqUnt410+x0OuV2W5sJgOhh5z3EquWbKrRgyVp1r4uqrG3RgiVrubMFAHEuJ2S771EpBWBgDrtNM8cNifYyAPQj4KyEYRi6/PLLlZaW5n+spaVF3/nOd5SV1dFq869//Su0K0TI1TW3V0qlUymF2OHxGlq0dEuPhJQkGfLNAFi0dIvmlBZzhwsA4tTgK6WYKQUAQCIJOCn1rW99q8dj8+fPD+liEBlUSiEWrSmvVkVtS5+fNyRV1LZoTXk1d7wAIE5lp/luiFmZKeX1Gmr1sPseAACJJOCsxGOPPRbOdSCCmCmFWFRV33dCysrzAACxZzCVUmZCSqJ9DwCARMEZPQnVUSmFGFSUkz7wk4J4HgAg9gxmppQ5T0oiKQUAQKLgjJ6EmCmFWDRjTKFK8tLV17Qom6SSPN+uKQCA+OSvlLKSlGrfec9htynFwSUsAACJgDN6EuqYKUVSCrHDYbdp4bxSSeqRmDI/XjivlCHnABDHstsrpeottO/5h5xTJQUAQMLgrJ6EOmZK0b6H2FI2qUQPz5+q4ryuLXrFeel6eP5UlU0qidLKAAChkO1v33MH/bVmpRRJKQAAEgdZiSRU356UolIKsahsUonmlBZrTXm1qupbVJTja9mjQgoA4l9mqm/XvBa3Vys/2aeZ44YGfHxvcbPzHgAAiYakVBKqa2bQOWKbw27TzHFDor0MAEAILd9UoZ/+e4v/4/l/WqOSvHQtnFcaUCWsv33PSaUUAACJgrN6Eqp3MegcAABEzvJNFVqwZK0q61q6PF5Z26IFS9Zq+aaKAb8H7XsAACQezupJyKyUyqVSCgAAhJnHa2jR0i0yevmc+diipVvk8fb2jA4dg85p3wMAIFGQlEoyhmH4Z0rlZlApBQAAwmtNebUqalv6/LwhqaK2RWvKq/v9Pi43u+8BAJBoOKsnmcZWj8wbkcyUAgAA4VZV33dCKpjn+dv3mCkFAEDC4KyeZOpbfK17KXabMpyUvwMAgPAqykkPyfNo3wMAIPGQlEoyZuteTnqKbLbAtmAGAACwasaYQpXkpauvqw6bpJK8dM0YU9jv9+lISnH5CgBAouCsnmTMSinmSQEAgEhw2G1aOK9UknokpsyPF84rlcPe/80yl5vd9wAASDSc1ZNMXXtSinlSAAAgUsomlejh+VNVnNe1Ra84L10Pz5+qskklA34Ps1IqlaQUAAAJg7N6kvFXSqVTKQUAACKnbFKJVt50mi4/YbQk6bgxBVp502kBJaQkZkoBAJCISEolmc4zpQAAACLJYbdpyqH57f9tH7BlrzP/7ntUSgEAkDA4qycZKqUAAEA05WemSpIONrmD+jqXu71SysnlKwAAiYKzepLpmClFUgoAAERefvtmK7VNrUF9He17AAAkHpJSSabeZe6+R/seAACIvAKrlVK07wEAkHA4qyeZ+mYqpQAAQPTkZ/muQZrdHrW4PQF/XUelFJevAAAkCs7qSabe5bsrmcugcwAAEAU5aSn+Aee1zYFXS3XMlKJ9DwCAREFSKskwUwoAAESTzWbzz5U6GMRcKdr3AABIPJzVk4x/9z1mSgEAgCjJy/QlpWqCmCvVyqBzAAASDkmpJONPSlEpBQAAosQcdl4TVKUUM6UAAEg0nNWTTH2LOVOKpBQAAIgOs30vmEopf1LKyeUrAACJgrN6EvF4peb2IaE5DDoHAABRkt9eKXUwqKSUOVOK9j0AABIFSakk0txp12WSUgAAIFoK/DOlgmjfc9O+BwBAouGsnkRa2pNSmakOpTj4pwcAANGRb2HQOe17AAAkHs7qSaTZN+OceVIAACCqOtr3ghl0TvseAACJhqRUEmn22CTRugcAAKLLXynVbKFSivY9AAASBmf1JOKvlMqgUgoAAERPQXulVKAzpQzDUCtJKQAAEg5n9SRiDjqnUgoAAESTWSkV6O57ZpWUJKU5ad8DACBRkJRKIsyUAgAAscCcKVXb5JZhGAM+v0tSikopAAASBmf1JNLCTCkAABADCtorpVo9XjW1egZ8vjnk3G6TUuy2sK4NAABEDkmpJMJMKQAAEAsynA6lOnyXoYEMO3e5zXlSDtlsJKUAAEgUJKWSCDOlAABALLDZbB1zpRoHHnbu33nPyaUrAACJhDN7EjErpXKYKQUAAKLMTErVBlIp1d6+xzwpAAASC2f2JNLSXimVS6UUAACIMnPY+cGmICqlUth5DwCAREJSKok0tw86Z/c9AAAQbeaw84NNwcyU4tIVAIBEwpk9iXQMOqdSCgAARFd+hq9SqjagSqn29j1mSgEAkFA4syeRjkHnVEoBAIDoys8KolKqvX3P3LEPAAAkBs7sScIwDLWYlVIkpQAAQJSZlVI1QSSlmCkFAEBiISmVJJpaPfLKN1Mqh0HnAAAgysyZUjWBtO+5ad8DACARcWZPEvUuX5mUw25TZip3GQEAQHRZ232PS1cAABIJZ/YkUd8+5TwnLUU2my3KqwEAAMku36yUaqZ9DwCAZEVSKkmYlVK07gEAgFhQkBnMTKn29j0qpQAASCic2ZNEXYvvgo+kFAAAiAX5nWZKeb1Gv891udsrpZgpBQBAQuHMniTq27feyyUpBQAAYkBehi8p5TU6Krr7QvseAACJiaRUkqhrMdv3nFFeCQAAgJTudCjD6Usy1Q7Qwkf7HgAAiYkze5JoaGGmFAAAiC0F7S18A+3AR6UUAACJiaRUEvB4DX1UWSdJanS1yTPA3AYAAIBIyG8fdj5gUoqZUgAAJCTO7Alu+aYKzVr8ml7YuFeS9PKWKs1a/JqWb6qI8soAAECyM4ed1zb3377X6jErpbh0BQAgkXBmT2DLN1VowZK1qqht6fJ4ZW2LFixZS2IKAABEVYFZKdU4UKWUOVOK9j0AABIJSakE5fEaWrR0i3pr1DMfW7R0C618AAAgavLaK6VqBqiU6pgpxaUrAACJhDN7glpTXt2jQqozQ1JFbYvWlFdHblEAAACdmIPOawLdfY+ZUgAAJBTO7Amqqr7vhJSV5wEAAIRaQaCDztl9DwCAhERSKkEV5aSH9HkAAAChlpcRYKWUm/Y9AAASEWf2BDVjTKFK8tJl6+PzNkkleemaMaYwkssCAADwMyulagaslDIHnXPpCgBAIuHMnqAcdpsWziuVpB6JKfPjhfNK5bD3lbYCAAAIr/xgB507ad8DACCRkJRKYGWTSvTw/Kkqzuvaolecl66H509V2aSSKK0MAABAyjdnSjUGOlOKS1cAABJJSrQXgPAqm1SiOaXFWrW9Sq+8tVpnnHScZo4vokIKAABEnVkpVdfSJo/X6PP6xOWmfQ8AgETEmT0JOOw2HTemUNOGGjpuTCEJKQAAEBPy2wedS1JtPy18tO8BAJCYSEoBAAAgKlIcduWk+wr3D/Yx7NwwDNr3AABIUJzZAQAAEDX+YedNvVdKtXq8/v8mKQUAQGLhzA4AAICoKWgfdl7TR6WUWSUlSWkptO8BAJBISEoBAAAgavIy+q+Ucrl9SSmbTXI6mIsJAEAiISkFAACAqDErpfqaKeVq69h5z2YjKQUAQCIhKQUAAICoKRhgppTZvpfq4LIVAIBEw9kdAAAAUZNnzpRq7qNSqr19L83JPCkAABINSSkAAABEjVkpdbDPSqmO9j0AAJBY4ubsXl1drUsvvVS5ubnKz8/XlVdeqYaGhn6f//3vf19HHnmkMjIydOihh+raa69VbW1tBFcNAACA/uS3J6VqB2jfIykFAEDiiZuz+6WXXqrNmzdrxYoVeuGFF/Tmm2/q6quv7vP5e/bs0Z49e3Tfffdp06ZNevzxx7V8+XJdeeWVEVw1AAAA+pM/4KBzMylF+x4AAIkmJdoLCMTWrVu1fPlyvffee5o+fbok6cEHH9TcuXN13333acSIET2+ZtKkSfrnP//p/3jcuHH62c9+pvnz56utrU0pKXHxqwMAACS0/IwBBp2729v3nHFzLxUAAAQoLjIzq1atUn5+vj8hJUmnn3667Ha7Vq9erfPOOy+g71NbW6vc3Nx+E1Iul0sul8v/cV1dnSTJ7XbL7e79YikemGuP598B0UHsIJ4Qr7CK2ImenFRfsqmmqbXXv3+Ty/dYqsMWc/8+xA2sIG4Qb4hZWBFovMRFUqqyslJFRUVdHktJSVFhYaEqKysD+h779+/XnXfe2W/LnyTdc889WrRoUY/HX3nlFWVmZga+6Bi1YsWKaC8BcYrYQTwhXmEVsRN5TW2SlKLGVo/+/cIydR8d9V6VTZJDdQcPaNmyZVFY4cCIG1hB3CDeELMIRlNTU0DPi2pS6uabb9bixYv7fc7WrVsH/XPq6ur01a9+VaWlpfrpT3/a73NvueUW3XDDDV2+dtSoUTrjjDOUm5s76LVEi9vt1ooVKzRnzhw5nc5oLwdxhNhBPCFeYRWxEz1er6GfvL9CXkOaecpsDctJ6/L52vc+l3Zs1SElxZo799joLLIPxA2sIG4Qb4hZWGF2nQ0kqkmpG2+8UZdffnm/zxk7dqyKi4tVVVXV5fG2tjZVV1eruLi436+vr69XWVmZcnJy9Oyzzw74IkpLS1NaWlqPx51OZ0K8ABPl90DkETuIJ8QrrCJ2oiMvw6mDTW41uA2N6Pb3b/PaJEnpqSkx+29D3MAK4gbxhphFMAKNlagmpYYNG6Zhw4YN+LyZM2eqpqZGH3zwgaZNmyZJeu211+T1enXcccf1+XV1dXU688wzlZaWpn//+99KT08P2doBAAAQGvmZqTrY5O512HnH7nsMOgcAINHExdl94sSJKisr01VXXaU1a9bo7bff1jXXXKOLLrrIv/Pe7t27NWHCBK1Zs0aSLyF1xhlnqLGxUX/6059UV1enyspKVVZWyuPxRPPXAQAAQCf5mb67qQebWnt8ztXWvvseSSkAABJOXAw6l6SnnnpK11xzjWbPni273a6vf/3reuCBB/yfd7vd2rZtm3+Y1tq1a7V69WpJ0vjx47t8r/Lyco0ePTpiawcAAEDfCjJTJfl24Ouuo1LKEdE1AQCA8IubpFRhYaGefvrpPj8/evRoGYbh//jLX/5yl48BAAAQm/IzfJVSvbXvtZpJKSeVUgAAJBrO7gAAAIiq/PZKqYO9zpSifQ8AgETF2R0AAABRZc6Uqm3upX3PTfseAACJiqQUAAAAoqrAHHTeyO57AAAkE87uAAAAiKo8c9B5b5VSZvseM6UAAEg4nN0BAAAQVWalVG+Dztl9DwCAxEVSCgAAAFFV4B903t9MKS5bAQBINJzdAQAAEFV5Gf1VSrH7HgAAiYqzOwAAAKKqIMtXKeVq86q51dPlc/72PSftewAAJBqSUgAAAIiqrFSHUuw2ST2HnbP7HgAAiYuzOwAAAKLKZrMp35wr1di1hY/2PQAAEhdndwAAAERdxw583Sql3Oy+BwBAoiIpBQAAgKjLN5NSzd0rpcyZUly2AgCQaDi7AwAAIOr87XvdK6Vo3wMAIGFxdgcAAEDU5WeY7XsdlVKGYXQadE77HgAAiYakFAAAAKKuIMtXKdV5ppTbY8gwfP+dSqUUAAAJh7M7AAAAoi6vl0ops3VPon0PAIBExNkdAAAAUVfgnynVOSnl9f83SSkAABIPZ3cAAABEXYG5+16n9j0zKZWaYpfNZovKugAAQPiQlAIAAEDU5ZlJqeZOlVJudt4DACCRcYYHAABA1Jnte71VSrHzHgAAiYmkFAAAAKIuP7Nj0LnRvuVeR1KKS1YAABIRZ3gAAABEnVkp1eY11OBqk9Spfc/JJSsAAImIMzwAAACiLt3pUHp78qmmfQc+2vcAAEhsJKUAAAAQE/IzzLlS3ZNSXLICAJCIOMMDAAAgJphzpQ62Dzt3tbH7HgAAiYwzPAAAAGKCf9h5c3ullLu9UspJ+x4AAImIpBQAAABigjnsvMZfKUX7HgAAiYwzPAAAAGKCv1KqfaZUK+17AAAkNM7wAAAAiAn57ZVSB3tUStG+BwBAIiIpBQAAgJhQ0K1Syp+UcnLJCgBAIuIMDwAAgJiQn9F9phTtewAAJDLO8AAAAIgJ5kypg03ddt+jfQ8AgIREUgoAAAAxwZwpVdvcrX2PSikAABISZ3gAAADEhAJ/pVS39j1mSgEAkJA4wwMAACAmdK6U8ngNdt8DACDBkZQCAABATMjL8FVKGYZU3+LuNFOKS1YAABIRZ3gA6WRgSAAAKHdJREFUAADEhNQUu7LTUiT5hp2z+x4AAImNMzwAAABihlktVdPU2tG+56R9DwCARERSCgAAADGjIMtMSrnZfQ8AgATHGR4AAAAxIz/DN+y8prmV9j0AABIcZ3gAAADEjPxMX6XUwcbOg85p3wMAIBGRlAIAAEDMKMhsr5TqNFMqlUopAAASEmd4AAAAxAyzUqqmmd33AABIdJzhAQAAEDPy2yulDnYadJ7u5JIVAIBExBkeAAAAMSM/w9x9r5WZUgAAJDiSUgAAAIgZBVlmUor2PQAAEh1neAAAAMQMs31vX71LXsP3GJVSAAAkJpJSAAAAiBlm+96+Bpf/sTRmSgEAkJA4wwMAACBmFLRXSnnMMilJqQ4uWQEASESc4QEAABAzcjOcstk6Pk512GW32/r+AgAAELdISgEAACBmOOw25aY7/R8z5BwAgMTFWR4AAAAxpSCzU1KKeVIAACQszvIAAACIKXntc6Ukdt4DACCRkZQCAABATOlSKUX7HgAACYuzPAAAAGJKfkZHUiqVpBQAAAmLszwAAABiSn7n9j0n7XsAACQqklIAAACIKfm07wEAkBQ4ywMAACCmFHQZdM7lKgAAiYqzPAAAAGJK10op2vcAAEhUJKUAAAAQU7rOlOJyFQCARMVZHgAAADGlgJlSAAAkBc7yAAAAiCn5GZ1nStG+BwBAoiIpBQAAgJiSn0WlFAAAyYCzPAAAAGJKptMhu8333wcaXfJ4jeguCAAAhAVJKQAAAMSM5ZsqdNK9r8vMQy39sEKzFr+m5ZsqorswAAAQciSlAAAAEBOWb6rQgiVrVVHb0uXxytoWLViylsQUAAAJhqQUAAAAos7jNbRo6Rb11qhnPrZo6RZa+QAASCAkpQAAABB1a8qre1RIdWZIqqht0Zry6sgtCgAAhBVJKQAAAERdVX3fCSkrzwMAALGPpBQAAACirignPaTPAwAAsY+kFAAAAKJuxphCleSly9bH522SSvLSNWNMYSSXBQAAwoikFAAAAKLOYbdp4bxSSeqRmDI/XjivVA57X2krAAAQb0hKAQAAICaUTSrRw/Onqjiva4tecV66Hp4/VWWTSqK0MgAAEA4p0V4AAAAAYCqbVKI5pcVaU16tqvoWFeX4WvaokAIAIPGQlAIAAEBMcdhtmjluSLSXAQAAwoz2PQAAAAAAAEQcSSkAAAAAAABEHEkpAAAAAAAARBxJKQAAAAAAAEQcSSkAAAAAAABEHEkpAAAAAAAARBxJKQAAAAAAAERc3CSlqqurdemllyo3N1f5+fm68sor1dDQ0O/X/M///I/GjRunjIwMDRs2TOecc44++uijCK0YAAAAAAAAfYmbpNSll16qzZs3a8WKFXrhhRf05ptv6uqrr+73a6ZNm6bHHntMW7du1csvvyzDMHTGGWfI4/FEaNUAAAAAAADoTUq0FxCIrVu3avny5Xrvvfc0ffp0SdKDDz6ouXPn6r777tOIESN6/brOSavRo0frrrvu0uTJk/Xpp59q3LhxEVk7AAAAAAAAeoqLpNSqVauUn5/vT0hJ0umnny673a7Vq1frvPPOG/B7NDY26rHHHtOYMWM0atSoPp/ncrnkcrn8H9fV1UmS3G633G73IH6L6DLXHs+/A6KD2EE8IV5hFbEDK4gbWEHcIN4Qs7Ai0HiJi6RUZWWlioqKujyWkpKiwsJCVVZW9vu1Dz30kH784x+rsbFRRx55pFasWKHU1NQ+n3/PPfdo0aJFPR5/5ZVXlJmZae0XiCErVqyI9hIQp4gdxBPiFVYRO7CCuIEVxA3iDTGLYDQ1NQX0vKgmpW6++WYtXry43+ds3bp1UD/j0ksv1Zw5c1RRUaH77rtP3/jGN/T2228rPT291+ffcsstuuGGG/wf19bW6tBDD9XMmTOVk5MzqLVEk9vt1uuvv65TTz1VTqcz2stBHCF2EE+IV1hF7MAK4gZWEDeIN8QsrKivr5ckGYbR7/OimpS68cYbdfnll/f7nLFjx6q4uFhVVVVdHm9ra1N1dbWKi4v7/fq8vDzl5eXp8MMP1/HHH6+CggI9++yzuvjii3t9flpamtLS0vwfm+17Y8aMCeA3AgAAAAAAgORLTuXl5fX5+agmpYYNG6Zhw4YN+LyZM2eqpqZGH3zwgaZNmyZJeu211+T1enXccccF/PMMw5BhGF1mRg1kxIgR+vzzz5WTkyObzRbw18Wauro6jRo1Sp9//rlyc3OjvRzEEWIH8YR4hVXEDqwgbmAFcYN4Q8zCCsMwVF9f3+fGdKa4mCk1ceJElZWV6aqrrtIjjzwit9uta665RhdddJH/F9y9e7dmz56tv/zlL5oxY4Z27typZ555RmeccYaGDRumL774Qj//+c+VkZGhuXPnBvyz7Xa7DjnkkHD9ahGXm5vLgQSWEDuIJ8QrrCJ2YAVxAyuIG8QbYhbB6q9CymSPwDpC4qmnntKECRM0e/ZszZ07V7NmzdIf/vAH/+fdbre2bdvmH6aVnp6ut956S3PnztX48eN14YUXKicnR++8806PoekAAAAAAACIrLiolJKkwsJCPf30031+fvTo0V0GaI0YMULLli2LxNIAAAAAAAAQpLiplMLgpKWlaeHChV2GuAOBIHYQT4hXWEXswAriBlYQN4g3xCzCyWYMtD8fAAAAAAAAEGJUSgEAAAAAACDiSEoBAAAAAAAg4khKAQAAAAAAIOJISgEAAAAAACDiSEoBAACEGfvKAIgkjjkA4gVJKYQEJz4Eq6qqKtpLAICwq66uliTZbLYorwRAMuCYAyDekJTCoDQ0NMjtdstms5GYQsDWrVun4uJivfnmm9FeChAQl8slr9cb7WUgzqxbt05Dhw7V+++/H+2lIM5wzIEVHHMQbzjWQSIphUHYunWrzjvvPD3zzDNqbW0lMYWAfPjhhzrllFN0/fXX6+STT472coABbdmyRZdddpneffddjnEI2Pr163XKKafohhtu0PTp06O9HMQRjjmwgmMO4g3HOphSor0AxKfPPvtMX//617Vjxw41NDQoPT1dZ599tlJTU2UYBiXD6NWmTZt0wgkn6IYbbtCdd94pwzC0fft27dmzR0cccYRKSkqivUSgi/Lycs2bN0/l5eX69NNP9dBDD2nq1Kkc49Av81j3ox/9SIsWLZJhGNq7d6/27t2r0tJSOZ3OaC8RMYpjDqzgmIN4w7EOnVEphaB5PB7985//1Pjx47VmzRrl5+fr7rvv1r///W8qptAnl8ul//3f/1Vzc7PuvPNOSdJZZ52lCy+8UKeeeqrmzZun6667LrqLBDppbW3Vk08+qWnTpmnTpk2qr6/XFVdcobVr1/qPcRzr0F1DQ4N+8IMfyOl0atGiRZKkr3/965o7d66mTJmiOXPm6De/+U10F4mYxDEHVnDMQbzhWIfuSEohaA6HQ6eddpouu+wyTZ48WS+++KKGDx/uT0y5XC4SU+ghNTVVt956qyZOnKjjjjtOc+bMkcPh0C9+8Qtt3LhR8+bN0xtvvKE77rgj2ksFJEl2u10zZszQ+eefr9LSUm3YsEFut9t/4eT1ermjhx5SUlL07W9/WyUlJZo3b57OPPNMtbW16X//93/1zjvv6LDDDtPTTz+tJ554ItpLRYzhmAMrOOYg3nCsQ3c2g8wBLHC73V1KgVtbW3XOOedo7969uvXWW3XOOefI6XTq+eef1znnnBPFlSIWdG7pXLdunS6//HKlpKTo+eef1yGHHCJJam5u1v/8z/9oz549WrZsmVJTU6O5ZECS1NLSovT0dP/HLpdLU6ZMkdPp1J///GdNmzZNhmHozTff1CmnnBLFlSIWmMc6l8ulZcuW6Uc/+pGKior0z3/+09+eXFtbq3nz5mnEiBH629/+FuUVI9ZwzEEwOOYgXnGsQ2ckpRCQ/fv36/PPP1dmZqaKiopUUFAgr9cru92utrY2paSkyOVy6dxzz9XevXt100036fXXX9e///1vvf/++xoxYkS0fwVEgZm8NA8zZgXdxo0bVVFRodmzZyslJUUej0cOh0M/+9nP9K9//Utvv/12lxMVECk1NTU6cOCAcnNzlZWVpczMTP8dO4/Ho5SUFLW0tGjq1KlyOp36/e9/ryeeeEKrVq3SihUrNGzYsGj/CogC8zwodbxJbGlp0WuvvSa73e6vDDWPddddd53Wrl2rN954Q3Y7RevJjGMOrOCYg3jDsQ79ISmFAW3YsEEXXHCBPB6PXC6Xhg8frt/+9rc6/vjj/c8xT46tra0677zztGLFCqWmpurNN9/U1KlTo7h6RMu2bdt0xx136MYbb/THgHnhZG792v3C6IorrpBhGHr00Uf9F1tApGzYsEHf/OY31dTUJK/Xq6lTp+rOO+/UhAkT/El4M9Hqcrn0pS99SVu3bpXT6dTKlSs51iWpTz75RH/605905ZVX6vDDD5fUcaxrbW2V3W7vcTy7+OKLNXToUD3wwAO0KCQxjjmwgmMO4g3HOgyEVDn6VVlZqXnz5uncc8/VsmXL9OCDD+rwww/XySef3KUE2Kx2SU1N1WGHHaacnBytXr2ag0iS2rlzp+bMmaNly5bprrvu0rp16yR1VErZ7fYuCanq6mrdeuuteuGFF/SjH/2IhBQi7osvvtCZZ56p2bNna8mSJfrBD36g+vp6zZw5U++++67sdrs8Ho+cTqfa2tqUlpamE088UXl5eXr//fc51iWpHTt2aNasWXr44Yf10EMPaceOHZLkf9OXmpra5XjW1NSkn/zkJ3rjjTd0zTXX8OYwiXHMgRUccxBvONYhIAbQj3Xr1hmTJk0yysvL/Y81NTUZP/zhD43U1FTjhRdeMAzDMDwej2EYhvG73/3OsNlsxtq1a6OxXMSApqYm4/LLLzfOP/9843e/+50xe/ZsY968eX3GxPLly41vfetbxiGHHELcIGpeffVVY9q0acaBAwf8j23fvt24+OKLjczMTH9smse6X/7ylxzrklxDQ4NxySWXGBdffLGxaNEiY8qUKcY111xjbN++vdfnP/vss8bFF19slJSUEDfgmIOgccxBPOJYh0BQjoB+1dbWavPmzf6ZQF6vVxkZGbr33nvV3NysSy65RO+//76/fPjCCy9UWVmZxo4dG81lI4oyMjJUVlamuro6XXXVVSouLtZDDz2khQsXatGiRZoyZUqX50+ePFlffPGFFi5cqDFjxkRp1Uh2NTU1Wr9+vdxut/+xcePG6b777pPb7dYFF1yg119/XaNGjZJhGDr11FO1bds2/7EPySctLU2nnHKKMjMzNX/+fBUWFurPf/6zJOm6667TuHHjujx/2rRp2rJli+644w6NHz8+GktGDOGYg2BxzEE84liHQDBTCv3yeDw67bTTVFJSooceekiFhYX+3t/du3frkksu0ezZs3Xbbbf527KA7v7xj3/okUceUWZmpu644w4de+yxcrlcOnDggEaMGOGPKSBaKisrdc4552j27Nm65ZZblJOT4//cu+++q+9///u67rrrdOmll0ZxlYg1LS0tSktL87fEPPDAA3r88cd14okn6vrrr9fYsWPV2tqqmpoaFRUV+YcOAxxzYAXHHMQbjnUIBO8C0S+Hw6ELL7xQn376qR544AHV1dX5kwcjR45Udna2PvroI9lsNpIK6MHj8UiSzj//fP3P//yPmpqadPvtt+u9997T9ddfr+nTp8vlcjHjAFFXXFysU045RS+//LL+9a9/qaWlxf+5448/Xh6PR2+//XYUV4hYlJ6e7t85SJKuvfZaXX755Xr77bf161//Wh999JF+/OMf6+yzz/YPIAYkjjmwhmMO4g3HOgSC9j30yWjfyWPBggXasWOHnn/+eTU3N+snP/mJcnNzJUlDhgxRQUGBPB6P7HY7yQVI6ogdh8Ph303jggsukM1m0x/+8Ad95Stfkcfj0csvv6y0tLRoLxdJzqzU+/nPf65vfOMb+sUvfqHm5mZdfvnlSk9PlySNGTNGI0aMiPJKEWt6O9Zde+21kqQnn3xSy5YtU1VVlV5//XWlpqZGebWIFRxzYBXHHMQTjnUIFO176JNZ8mseUO688069+OKLqqmp0dlnn63PP/9cL7zwgt59910dddRR0V4uYoQZN7W1tcrLy5PUcRElSbNnz9batWv11ltvadKkSdFcKiBJPdobrrjiCn344YcaMmSIzjjjDH300Uf6+9//rjVr1mjChAlRXCliiRk3DQ0Nys7OlqQurcjHH3+8Pv74Y/33v//V0UcfHc2lIsZwzIEVHHMQbzjWIVDUdEKS76TWmXkQ+eyzz3T00UfrjTfe0G233abFixfrjDPO0MaNG5WWlqZVq1aRkEpi/cXNySefrBdeeEGSb6vitrY2/fjHP9Zbb72lN954g4QUos4wDLW1tflj9pRTTtHGjRv1pz/9ST/4wQ80bNgw/eMf/9CBAwe0cuVKLpggqWfcnHvuuVq5cqUkyW63y+1266qrrtKaNWt4c4guOObACo45iHUul6vHYxzrEAwqpZJc52qW7sOmP/vsM5144ok666yz9Nvf/lYpKR3dnoZhMNg8iQUaNw8//HCXls5nnnlGEyZM0OTJkyO+ZiS3PXv26L333lNLS4sOP/xwTZ061f+5nTt36stf/rK+8pWv6He/+12XY53L5ZLdbpfT6YzGshFlgcbNI4880uVY9/vf/15Tp07Vl770pWgsGzGgvLxczz33nPbt26eZM2dq3rx5/s9xzEFfAo0bjjmIFVu2bNFVV12lxYsXa9asWV0+x7EOgSIplcS2bNmiE044QT/+8Y916623SuqaYLjiiivkdDq7nPg6t2EhORE3iDcbN27Ueeedp4KCAlVVVUmSHnroIX31q1+VYRgqKyvT0KFDtWTJEuIUflbihmMdJGnDhg366le/qiOOOEItLS1atWqVnnvuOZ199tmSpDPOOENDhw7VU089RbzAz0rccMxBtF1xxRV6/PHHNXbsWD355JOaOXOmvF6vbDabzjzzTA0ZMkRPP/00cYp+kZRKUl988YXOPvtsNTY2av/+/frRj36km2++WVJHC5Y5QBEwETeINzt27NApp5yi+fPn6+abb9bnn3+uhx56SPv27dMTTzyhrKwstba2yul0csEEP+IGVn388ceaPXu2vvnNb+qnP/2pGhsbNX/+fH31q1/Vd7/7XUm+CgE2+UBnxA3i1WOPPaZt27Zp//79eu655/Tss8/qpJNOktRzphTQF3bfS0Jer1f//Oc/NWbMGF1zzTVas2aN7r77bknSzTffTGIBvSJuEG9aW1v1u9/9TieccILuvPNOOZ1O5efn60tf+pJuu+02/0w0dihCZ8QNrGptbdWiRYs0e/Zs3XnnnXI4HEpNTVVGRobeffddffDBBzrmmGN02WWXkVyAH3GDeJaZmam33npL//nPf7R3716df/75euONN/Tkk09q8uTJuvDCC6O9RMQBklJJyG63a+7cuSoqKtKpp56qY489VoZh6J577pHkSzA4nc4es4KQ3IgbxBu73a7x48drzJgxcjqd/jaH0047TXfccYdqa2uVk5PT5WtohQBxA6tSU1N16623as+ePf7qgLvvvlvPPvusLr74YqWnp+v666/X9u3b9eCDD0Z5tYgVxA3i2dSpU/1J1KVLl+qiiy7SlClTlJ2drVWrVkV7eYgTJKWS1OGHH67x48dLkgoKCvTtb39bNputS+WLYRhaunSpZs6cqaFDh0ZzuYgRxA3iSUpKis477zyVlJR0edyscPF4PP5kwkcffaQJEyaQWABxg0E56qij/LsSb9y4Ua+//rpeeOEFlZWV+ZObF110ka655hodeeSRUV4tYgVxg3h1+OGH6+DBg/r44491xBFHyOFwKCUlRV6vV7W1tdFeHuIESakksWfPHu3evVsHDhzQ6aefLrvdLrvdrra2NqWkpGjo0KG64oorJPnuzhiGoQMHDuj+++/Xrl27orx6RAtxg3hjxuz+/ft15plnavjw4ZLkj1mv16u6ujo1NTUpNTVVNptNt9xyixYvXqyDBw8qNzeXBEMSIm5gVV/nSUk6+uij9Ze//KVLgtNut6u0tJSbNkmOuEG86Ryzc+bMkc1mk91uV3NzswoKClRfX69rr71Wb7zxhl577TX98pe/1PHHH6933nlHM2bMiPbyEeNISiWBDRs26KyzzlJOTo4+/vhjHX300br66qs1f/58ZWdn+4fQDRs2TFdeeaUMw9Ctt96q/Px8rVq1qsfdYiQH4gbxpreYveqqq/TNb35T2dnZ/tbS1NRUpaSkKCMjQ4sWLdLvfvc7vfvuu8rLy4v2r4AoIG5g1UDnSUkqLi7u8jWrVq3SIYccwkyyJEbcIN70FbOXXHKJcnNzNW3aNJ100kkqKCjQ0qVLNXXqVD355JNKTU1Vfn5+tJePOMDglwS3f/9+XXTRRbrkkkv04osvas+ePZowYYIef/xx3Xbbbaqvr5fD4fAPbh06dKi2bNminJwcrVy5UtOnT4/yb4BoIG4Qb/qK2SeeeMIfs+Zd6OzsbA0bNkxXX3217r77br3++uvcxUtSxA2sCuQ8KclfQVdRUaHbbrtNjz76qBYvXtxjLhmSA3GDeNNfzN5+++1qbm7WySefrNNOO00vvviipk6dKsnX8v7kk0/qiCOOiPJvgLhgIKFt3LjRGD16tPHhhx/6H3O5XMbtt99uzJgxw/jJT35iNDc3G4ZhGF6v13jyySeN4cOHGx988EG0lowYQNwg3gQTs5s2bTJsNpuRkZFhrF+/PlpLRgwgbmBVMLHz/vvvG/PnzzfGjBljrFu3LkorRiwgbhBv+ovZ6dOnG4sWLTIMwzDq6+ujtUQkACqlEpw5+8Kc79PW1qbU1FTddtttOuWUU/Tiiy/qvffek+S7K3PiiSdq9erV/iw3khNxg3gTTMyOHDlSN954oz744ANNnjw5mstGlBE3sCqY2CkuLtY3vvENvfrqqzr22GOjuGpEG3GDeNNfzJ566qn65z//qZUrVyo7O1uGYUR5tYhXNoPoSWgul0uzZs1ScXGxnnvuOTkcDv/gVsMwNHnyZE2ZMkVPPPEEW1rDj7hBvAkmZs3np6WlRXnViDbiBlYFEjvHHnus/vKXv0R7qYghxA3iTbDnScAKKqUSmNfrVVpamh577DG9+eabWrBggST5DyI2m01nn322qqqqJInEAiQRN4g/wcSseR+GxAKIG1gVaOzs27cvyitFLCFuEG+CfU8AWEVSKoHZ7XZ5PB5NmjRJTzzxhP7617/qsssu0969e/3PKS8vV0FBgTweTxRXilhC3CDeBBOz5nB+gLiBVZwnYQVxg3hDzCJSaN9LIN3bqMzSyoaGBrlcLq1fv16XXHKJDjvsMBUWFmrIkCF6/vnntWrVKh199NFRXDmiibhBvCFmYQVxA6uIHVhB3CDeELOIFiqlEoCZmTbzi4Zh+A8in376qY444gi99957mj17tjZv3qy5c+dq5MiRKioq0po1aziIJCniBvGGmIUVxA2sInZgBXGDeEPMItqolIpzH3/8sR5++GHt2rVLkydP1je/+U2NGTNGkvT5559r6tSpOuecc/Too4/K6/XK4XD4s+Ber1d2O3nJZETcIN4Qs7CCuIFVxA6sIG4Qb4hZxAKiKI5t3LhRJ5xwgg4ePCiv16uXXnpJf/3rX2UYhtxut55//nnNnz9fjz76qGw2mxwOR5evZ0B1ciJuEG+IWVhB3MAqYgdWEDeIN8QsYgWVUnFq586dOu200zR//nzdddddkqRvf/vbysrK0v333+9/nsfj6XEAQfIibhBviFlYQdzAKmIHVhA3iDfELGIJlVJxyOPxaMWKFZo9e7ZuvPFGf/9vRkaGNm3apFNOOUWXXXaZ3nnnHX+JJUDcIN4Qs7CCuIFVxA6sIG4Qb4hZxBoqpeJUeXm5mpqadNRRR0mS7rjjDt1zzz26/fbb1dLSom3btum9997Tf/7zH39fMEDcIN4Qs7CCuIFVxA6sIG4Qb4hZxJKUaC8A1owZM8aftXa5XFq9erX+8Y9/6Ktf/aokaeXKlfr617+u7du3cyCBH3GDeEPMwgriBlYRO7CCuEG8IWYRS0hKxYk9e/Zo7dq1am1t1WGHHaZp06bJZrPJ4/EoLS1NS5culd1u9++CUFhYqOHDh6uwsDDaS0cUETeIN8QsrCBuYBWxAyuIG8QbYhaxjKRUHNi4caPOPfdcDR06VDt37tTo0aN100036fzzz/cPnjN3PzC35XzyySeVnp6uww47LGrrRnQRN4g3xCysIG5gFbEDK4gbxBtiFrGOQecxbseOHZo7d67OP/98vfLKK1q+fLmOOuoovfTSS/J4PP6yS/NAsmvXLv34xz/W73//e/3xj3/U0KFDo7l8RAlxg3hDzMIK4gZWETuwgrhBvCFmERcMxCyXy2XccMMNxje+8Q3D5XL5H//Tn/5kDBkyxNi/f3+X57/33nvGd7/7XWPy5MnG+vXrI71cxAjiBvGGmIUVxA2sInZgBXGDeEPMIl7QvhfDvF6vDjnkEE2cOFGpqakyDEM2m00nnHCCsrOz5Xa7uzx/+vTpam5u1v/+7/+qpKQkSqtGtBE3iDfELKwgbmAVsQMriBvEG2IW8YKkVAxLT0/Xueee22PHg/z8fDmdzi4Hkg8++EDTpk3TSSedFOllIsYQN4g3xCysIG5gFbEDK4gbxBtiFvGCmVIxpqKiQmvWrNHy5cvl9Xr9BxGPx+Pv9a2trdXBgwf9X3P77bdrzpw5OnDggL8vGMmFuEG8IWZhBXEDq4gdWEHcIN4Qs4hHVErFkA0bNujss89WWlqa9u7dq5KSEt1+++0688wzVVhY6C+5tNlsstvtys7O1l133aX77rtPb731loYMGRLtXwFRQNwg3hCzsIK4gVXEDqwgbhBviFnErYhOsEKfqqqqjAkTJhi33nqrsWPHDmP37t3GhRdeaEycONFYuHChUVVV5X/u3r17jSlTphgXXnihkZqaarz//vtRXDmiibhBvCFmYQVxA6uIHVhB3CDeELOIZySlYsTmzZuN0aNH9zgo3HTTTcbRRx9t3HvvvUZjY6NhGIaxZcsWw2azGRkZGca6deuisFrECuIG8YaYhRXEDawidmAFcYN4Q8winjFTKka43W61tbWpqalJktTc3CxJ+vnPf65TTz1VDz/8sLZv3y5JKigo0He/+12tXbtWxx57bLSWjBhA3CDeELOwgriBVcQOrCBuEG+IWcQzm2EwzSxWzJgxQ9nZ2XrttdckSS6XS2lpaZKkL33pSxo/frz++te/SpJaWlqUnp4etbUidhA3iDfELKwgbmAVsQMriBvEG2IW8YpKqShpbGxUfX296urq/I/9/ve/1+bNm3XJJZdIktLS0tTW1iZJOvnkk9XY2Oh/LgeR5ETcIN4Qs7CCuIFVxA6sIG4Qb4hZJBKSUlGwZcsWfe1rX9Mpp5yiiRMn6qmnnpIkTZw4Uffff79WrFihCy64QG63W3a775+oqqpKWVlZamtrY6vOJEXcIN4Qs7CCuIFVxA6sIG4Qb4hZJJqUaC8g2WzZskUnn3yyLrvsMk2fPl0ffPCB/t//+38qLS3VlClTdPbZZysrK0vf/e53dcwxx2jChAlKTU3Viy++qHfffVcpKfyTJSPiBvGGmIUVxA2sInZgBXGDeEPMIhExUyqCqqurdfHFF2vChAm6//77/Y+feuqpOvroo/XAAw/4H6uvr9ddd92l6upqpaena8GCBSotLY3GshFlxA3iDTELK4gbWEXswAriBvGGmEWiIlUaQW63WzU1NTr//PMlSV6vV3a7XWPGjFF1dbUkyTAMGYahnJwcLV68uMvzkJyIG8QbYhZWEDewitiBFcQN4g0xi0RFdEbQ8OHDtWTJEp100kmSJI/HI0kaOXKk/0Bhs9lkt9u7DK2z2WyRXyxiBnGDeEPMwgriBlYRO7CCuEG8IWaRqEhKRdjhhx8uyZexdjqdknwZ7aqqKv9z7rnnHv3xj3/075bAgQTEDeINMQsriBtYRezACuIG8YaYRSKifS9K7Ha7DMPwHyTM7Pbtt9+uu+66S+vWrWMQHXogbhBviFlYQdzAKmIHVhA3iDfELBIJlVJRZM6YT0lJ0ahRo3Tffffp3nvv1fvvv6/JkydHeXWIVcQN4g0xCyuIG1hF7MAK4gbxhphFoiB9GkVmRtvpdOrRRx9Vbm6uVq5cqalTp0Z5ZYhlxA3iDTELK4gbWEXswAriBvGGmEWioFIqBpx55pmSpHfeeUfTp0+P8moQL4gbxBtiFlYQN7CK2IEVxA3iDTGLeGczzLo/RFVjY6OysrKivQzEGeIG8YaYhRXEDawidmAFcYN4Q8winpGUAgAAAAAAQMTRvgcAAAAAAICIIykFAAAAAACAiCMpBQAAAAAAgIgjKQUAAAAAAICIIykFAAAAAACAiCMpBQAAAAAAgIgjKQUAAAAAAICIIykFAAAQRZdffrlsNptsNpucTqeGDx+uOXPm6M9//rO8Xm/A3+fxxx9Xfn5++BYKAAAQYiSlAAAAoqysrEwVFRX69NNP9dJLL+nUU0/VD37wA5111llqa2uL9vIAAADCgqQUAABAlKWlpam4uFgjR47U1KlTdeutt+r555/XSy+9pMcff1yS9Ktf/UpHH320srKyNGrUKH33u99VQ0ODJOmNN97Q//t//0+1tbX+qquf/vSnkiSXy6Uf/vCHGjlypLKysnTcccfpjTfeiM4vCgAA0AlJKQAAgBh02mmnafLkyfrXv/4lSbLb7XrggQe0efNmPfHEE3rttdf04x//WJJ0wgkn6De/+Y1yc3NVUVGhiooK/fCHP5QkXXPNNVq1apX+9re/acOGDbrgggtUVlamTz75JGq/GwAAgCTZDMMwor0IAACAZHX55ZerpqZGzz33XI/PXXTRRdqwYYO2bNnS43P/+Mc/9J3vfEf79++X5Jspdd1116mmpsb/nF27dmns2LHatWuXRowY4X/89NNP14wZM3T33XeH/PcBAAAIVEq0FwAAAIDeGYYhm80mSfrPf/6je+65Rx999JHq6urU1tamlpYWNTU1KTMzs9ev37hxozwej4444oguj7tcLg0ZMiTs6wcAAOgPSSkAAIAYtXXrVo0ZM0affvqpzjrrLC1YsEA/+9nPVFhYqJUrV+rKK69Ua2trn0mphoYGORwOffDBB3I4HF0+l52dHYlfAQAAoE8kpQAAAGLQa6+9po0bN+r666/XBx98IK/Xq1/+8pey230jQf/+9793eX5qaqo8Hk+Xx6ZMmSKPx6OqqiqddNJJEVs7AABAIEhKAQAARJnL5VJlZaU8Ho/27t2r5cuX65577tFZZ52lyy67TJs2bZLb7daDDz6oefPm6e2339YjjzzS5XuMHj1aDQ0NevXVVzV58mRlZmbqiCOO0KWXXqrLLrtMv/zlLzVlyhTt27dPr776qo455hh99atfjdJvDAAAwO57AAAAUbd8+XKVlJRo9OjRKisr0+uvv64HHnhAzz//vBwOhyZPnqxf/epXWrx4sSZNmqSnnnpK99xzT5fvccIJJ+g73/mOLrzwQg0bNkz33nuvJOmxxx7TZZddphtvvFFHHnmkzj33XL333ns69NBDo/GrAgAA+LH7HgAAAAAAACKOSikAAAAAAABEHEkpAAAAAAAARBxJKQAAAAAAAEQcSSkAAAAAAABEHEkpAAAAAAAARBxJKQAAAAAAAEQcSSkAAAAAAABEHEkpAAAAAAAARBxJKQAAAAAAAEQcSSkAAAAAAABEHEkpAAAAAAAARBxJKQAAAAAAAETc/wfDlv+CgTWq7gAAAABJRU5ErkJggg==",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"\n",
"# Set the 'time_period_start' column as the index for plotting\n",
"ohlcv_df.set_index(\"time_period_start\", inplace=True)\n",
"\n",
"# Plotting the daily movement\n",
"plt.figure(figsize=(12, 6)) # Set the figure size\n",
"plt.plot(ohlcv_df[\"daily_movement\"], marker=\"o\", linestyle=\"-\")\n",
"plt.title(\"Daily Price Movement\")\n",
"plt.xlabel(\"Date\")\n",
"plt.ylabel(\"Price Movement\")\n",
"plt.grid(True)\n",
"plt.xticks(rotation=45) # Rotates the date labels for better readability\n",
"plt.tight_layout() # Adjusts the plot to ensure everything fits without overlapping\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 607
},
"id": "yRipqYxSMu3I",
"outputId": "e2f770fe-3513-44db-efe3-2a36b5b54751"
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAABKUAAAJOCAYAAABm7rQwAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3hU5dbG4d+k94SShN5rqEqxIKiIoNgrWFHRox4QFUVF/fR4LNhAOaJg7yg2VBBpCorSu1TpofcUSM/s74/tniSkzUwmM5Pkua9rriQze/Z+g0mcPFlrvTbDMAxERERERERERES8KMDXCxARERERERERkZpHoZSIiIiIiIiIiHidQikREREREREREfE6hVIiIiIiIiIiIuJ1CqVERERERERERMTrFEqJiIiIiIiIiIjXKZQSERERERERERGvUyglIiIiIiIiIiJep1BKRERERERERES8TqGUiIj4ldtuu41mzZr5ehllatasGbfddpuvl+FTH330ETabjZ07d/p6KaU69b/T/PnzsdlszJ8/32dr8pSdO3dis9n46KOPfL0UKaQ6fY2JiIh4g0IpERHxGCuosG5hYWG0adOG4cOHc/DgQV8vr1zbtm3j7rvvpkWLFoSFhRETE0OvXr0YP348mZmZvl5elWKFJs7c/DnY8oTLL7+ciIgI0tPTSz3mpptuIiQkhKNHj3pxZb6VnJzMPffcQ7NmzQgNDSUhIYErr7ySP//809dLK+K2225z6uu4pgfVIiIi7gjy9QJERKT6+e9//0vz5s3Jysrijz/+YOLEicyYMYN169YRERFR5nPfffdd7Ha7l1Za4KeffuK6664jNDSUW2+9lY4dO5KTk8Mff/zBqFGjWL9+Pe+8847X11VVxcfH8+mnnxa5b+zYsezZs4fXXnut2LHe0KdPHzIzMwkJCfHK9Sw33XQT06ZNY+rUqdx6663FHs/IyOCHH37goosuok6dOl5dm6/8+eefDBw4EIA777yTpKQkDhw4wEcffUTv3r0ZP3489913n49Xabr77rvp16+f4+MdO3bw1FNP8a9//YvevXs77m/ZsiVnnHGGT77GREREqiqFUiIi4nEXX3wx3bt3B8xfOOvUqcO4ceP44YcfuOGGG0p8zsmTJ4mMjCQ4ONibSwXMXzIHDx5M06ZN+fXXX6lfv77jsWHDhrF161Z++uknr6+rKouMjOTmm28uct+XX37J8ePHi91fmGEYZGVlER4e7vE1BQQEEBYW5vHzlufyyy8nOjqayZMnlxhK/fDDD5w8eZKbbrrJ62vzhePHj3PttdcSHh7On3/+ScuWLR2PjRw5kgEDBvDAAw/QrVs3zj77bK+tKysri5CQEAICijYSnHXWWZx11lmOj5cvX85TTz3FWWedVeLXsi++xkRERKoqte+JiEil69u3L2CGP2C2w0RFRbFt2zYGDhxIdHS04xfykmZK2e12xo8fT6dOnQgLCyM+Pp6LLrqI5cuXFznus88+o1u3boSHh1O7dm0GDx7M7t27y13fyy+/zIkTJ3j//feLBFKWVq1acf/995d5ju3bt3PddddRu3ZtIiIiOPPMM0sMst544w06dOhAREQEtWrVonv37kyePLnIMXv37uWOO+4gMTGR0NBQOnTowAcffFDu5wHw4Ycf0rdvXxISEggNDSUpKYmJEycWO65Zs2Zceuml/PHHH/Ts2ZOwsDBatGjBJ598UuzY9evX07dvX8LDw2nUqBHPPfecx6rZrHXMmjWL7t27Ex4ezttvv+3S52IYBs899xyNGjUiIiKC888/n/Xr1xc7rqR5P+eddx4dO3Zkw4YNnH/++URERNCwYUNefvnlYs/ftWsXl19+OZGRkSQkJPDggw8ya9ascmcIhYeHc/XVV/PLL79w6NChYo9PnjyZ6OhoLr/8csD5r6VTnXfeeZx33nnF7j/1e8pqrXz11Vd58803adGiBREREfTv35/du3djGAbPPvssjRo1Ijw8nCuuuIJjx44VO+/PP/9M7969iYyMJDo6mksuuaTEf/dTvf322xw4cIBXXnmlSCAF5r/Vxx9/jM1m47///S9ghkA2m42PP/642Lmsf//p06c77nPm+8f6Wvjyyy958sknadiwIREREaSlpZW7/rKU9TW2du1azj33XCIiImjVqhXffPMNAL/99htnnHEG4eHhtG3blrlz5xY7b0V+JoiIiPgzVUqJiEil27ZtG0CR1qS8vDwGDBjAOeecw6uvvlpmW9/QoUP56KOPuPjii7nzzjvJy8tjwYIFLF682FGR9fzzz/N///d/XH/99dx5550cPnyYN954gz59+rBq1Sri4uJKPf+0adNo0aKF21UZBw8e5OyzzyYjI4MRI0ZQp04dPv74Yy6//HK++eYbrrrqKsBsTRwxYgTXXnst999/P1lZWaxdu5YlS5Zw4403Os515plnYrPZGD58OPHx8fz8888MHTqUtLQ0HnjggTLXMnHiRDp06MDll19OUFAQ06ZN49///jd2u51hw4YVOXbr1q1ce+21DB06lCFDhvDBBx9w22230a1bNzp06ADAgQMHOP/888nLy+Oxxx4jMjKSd955x6OVTJs3b+aGG27g7rvv5q677qJt27YufS5PPfUUzz33HAMHDmTgwIGsXLmS/v37k5OT49T1jx8/zkUXXcTVV1/N9ddfzzfffMOjjz5Kp06duPjiiwGzkq9v377s37+f+++/n3r16jF58mTmzZvn1DVuuukmPv74Y7766iuGDx/uuP/YsWPMmjWLG264gfDwcKe/ljzh888/Jycnh/vuu49jx47x8ssvc/3119O3b1/mz5/Po48+ytatW3njjTd4+OGHi4Qgn376KUOGDGHAgAG89NJLZGRkMHHiRM455xxWrVpV5mYF06ZNIywsjOuvv77Ex5s3b84555zDr7/+SmZmJt27d6dFixZ89dVXDBkypMixU6ZMoVatWgwYMABw/fvn2WefJSQkhIcffpjs7OxKa7s7fvw4l156KYMHD+a6665j4sSJDB48mM8//5wHHniAe+65hxtvvJFXXnmFa6+9lt27dxMdHe3W5yQiIlKlGCIiIh7y4YcfGoAxd+5c4/Dhw8bu3buNL7/80qhTp44RHh5u7NmzxzAMwxgyZIgBGI899lixcwwZMsRo2rSp4+Nff/3VAIwRI0YUO9ZutxuGYRg7d+40AgMDjeeff77I43/99ZcRFBRU7P7CUlNTDcC44oornP48mzZtagwZMsTx8QMPPGAAxoIFCxz3paenG82bNzeaNWtm5OfnG4ZhGFdccYXRoUOHMs89dOhQo379+saRI0eK3D948GAjNjbWyMjIKPP5JT0+YMAAo0WLFsU+B8D4/fffHfcdOnTICA0NNR566KFin9uSJUuKHBcbG2sAxo4dO8pcT2GXXHJJkf+2hdcxc+ZMtz6XQ4cOGSEhIcYll1zi+HowDMN4/PHHDaDIf6d58+YZgDFv3jzHfeeee64BGJ988onjvuzsbKNevXrGNddc47hv7NixBmB8//33jvsyMzONdu3aFTtnSfLy8oz69esbZ511VpH7J02aZADGrFmzDMNw/mtpx44dBmB8+OGHRT6Xc889t9i1T/2esp4bHx9vpKSkOO4fPXq0ARhdunQxcnNzHfffcMMNRkhIiJGVleVYT1xcnHHXXXcVuc6BAweM2NjYYvefKi4uzujSpUuZx4wYMcIAjLVr1zrWFhwcbBw7dsxxTHZ2thEXF2fccccdjvuc/f6xvhZatGhR7vfUqZYtW1bs395S1tfY5MmTHfdt2rTJAIyAgABj8eLFjvtnzZpV7NwV/ZkgIiLiz9S+JyIiHtevXz/i4+Np3LgxgwcPJioqiqlTp9KwYcMix917773lnuvbb7/FZrPx9NNPF3vMZrMB8N1332G327n++us5cuSI41avXj1at25dZjWL1a5jVSW4Y8aMGfTs2ZNzzjnHcV9UVBT/+te/2LlzJxs2bAAgLi6OPXv2sGzZshLPYxgG3377LZdddhmGYRT5XAYMGEBqaiorV64scy2FK5hSU1M5cuQI5557Ltu3byc1NbXIsUlJSUUGNcfHx9O2bVu2b99e5HM788wz6dmzZ5HjPDn/qHnz5o5KF1c/l7lz5zqqfayvB8Cl6pGoqKgis4FCQkLo2bNnkX+HmTNn0rBhQ0eLHZizg+666y6nrhEYGMjgwYNZtGhRkd0GJ0+eTGJiIhdccAHg/NeSJ1x33XXExsY6Pj7jjDMAuPnmmwkKCipyf05ODnv37gVgzpw5pKSkcMMNNxT5Gg0MDOSMM84ot3osPT293O8363Hr+3PQoEHk5uby3XffOY6ZPXs2KSkpDBo0CHDv+2fIkCGVMr/sVFFRUQwePNjxcdu2bYmLi6N9+/aOf3co+G9gfe154meCiIiIP1P7noiIeNybb75JmzZtCAoKIjExkbZt2xYbHhwUFESjRo3KPde2bdto0KABtWvXLvWYLVu2YBgGrVu3LvHxsoanx8TEAOYvyu7atWtXkV8sLe3bt3c83rFjRx599FHmzp1Lz549adWqFf379+fGG2+kV69eABw+fJiUlBTeeeedUnf6K2kmUWF//vknTz/9NIsWLSIjI6PIY6mpqUVCiCZNmhR7fq1atTh+/Hi5n5vVYucJzZs3L/F+Zz6XXbt2ART7bx8fH0+tWrWcun6jRo2KBFpg/jusXbvW8fGuXbto2bJlseNatWrl1DXAbOF77bXXmDx5Mo8//jh79uxhwYIFjBgxgsDAQMd1nPla8oRT//tbXxuNGzcu8X7r62LLli1Away4U1nfU6WJjo4u9/vNetwKp7p06UK7du2YMmUKQ4cOBczWvbp16zrW4c73T2lfe55W0tdYbGxsuf/WnviZICIi4s8USomIiMf17NnTMeupNKGhocWCKnfZ7XZsNhs///yz45f7wqKiokp9bkxMDA0aNGDdunUeWUtZ2rdvz+bNm5k+fTozZ87k22+/5a233uKpp57imWeecQwPv/nmm4vNzrF07ty51PNv27aNCy64gHbt2jFu3DgaN25MSEgIM2bM4LXXXis2nLykfyswqzO8qaRKFVc/l4rw1r9Dt27daNeuHV988QWPP/44X3zxBYZheKzqzGazlbjm/Pz8Eo8v7fMu79/D+rf/9NNPqVevXrHjCldZlaR9+/asWrWK7OxsQkNDSzxm7dq1BAcHFwkbBw0axPPPP8+RI0eIjo7mxx9/5IYbbnBcz53vH29USUHF/63d/ZkgIiLi7xRKiYiIX2vZsiWzZs3i2LFjpVZLtWzZEsMwaN68OW3atHH5GpdeeinvvPMOixYtKrL1u7OaNm3K5s2bi92/adMmx+OWyMhIBg0axKBBg8jJyeHqq6/m+eefZ/To0cTHxxMdHU1+fj79+vVzeR3Tpk0jOzubH3/8sUgVjLPDuEvStGlTR2VMYSV9vp7k7Odi/dtu2bKFFi1aOO4/fPhwkYqvimratCkbNmzAMIwiFS9bt2516Tw33XQT//d//8fatWuZPHkyrVu3pkePHkWu4+zX0qlq1apVpOXQYlWTeYq1Y15CQoJbX6eXXnopixYt4uuvvy7SNmnZuXMnCxYsoF+/fkVCo0GDBvHMM8/w7bffkpiYSFpaWpGWuIp+//ij6vg5iYiIFKaZUiIi4teuueYaDMPgmWeeKfaYVU1w9dVXExgYyDPPPFOsUsQwDI4ePVrmNR555BEiIyO58847OXjwYLHHt23bxvjx40t9/sCBA1m6dCmLFi1y3Hfy5EneeecdmjVrRlJSEkCxdYSEhJCUlIRhGOTm5hIYGMg111zDt99+W2Ll1uHDh8v8PKyqi8L/BqmpqXz44YdlPq8sAwcOZPHixSxdurTIOj7//HO3z+kMZz+Xfv36ERwczBtvvFHk2Ndff92j6xkwYAB79+7lxx9/dNyXlZXFu+++69J5rKqop556itWrVxerknL2a6kkLVu2ZNOmTUW+TtasWcOff/7p0hrLM2DAAGJiYnjhhRfIzc0t9nh5X6d33303CQkJjBo1qliIlpWVxe23345hGDz11FNFHmvfvj2dOnViypQpTJkyhfr169OnTx/H4xX9/vFH1fFzEhERKUyVUiIi4tfOP/98brnlFv73v/+xZcsWLrroIux2OwsWLOD8889n+PDhtGzZkueee47Ro0ezc+dOrrzySqKjo9mxYwdTp07lX//6Fw8//HCp12jZsiWTJ09m0KBBtG/fnltvvZWOHTuSk5PDwoUL+frrr7nttttKff5jjz3GF198wcUXX8yIESOoXbs2H3/8MTt27ODbb791tCn279+fevXq0atXLxITE9m4cSMTJkzgkksucczOefHFF5k3bx5nnHEGd911F0lJSRw7doyVK1cyd+5cjh07Vuo6+vfvT0hICJdddhl33303J06c4N133yUhIYH9+/e79e//yCOP8Omnn3LRRRdx//33ExkZyTvvvEPTpk2LzFzyNGc/l/j4eB5++GHGjBnDpZdeysCBA1m1ahU///wzdevW9dh67r77biZMmMANN9zA/fffT/369fn8888JCwsDKDYvqDTNmzfn7LPP5ocffgAoFko5+7VUkjvuuINx48YxYMAAhg4dyqFDh5g0aRIdOnRwDAz3hJiYGCZOnMgtt9zC6aefzuDBg4mPjyc5OZmffvqJXr16MWHChFKfX6dOHb755hsuueQSTj/9dO68806SkpI4cOAAH330EVu3bmX8+PGcffbZxZ47aNAgnnrqKcLCwhg6dGixf4+KfP/4q+r4OYmIiFgUSomIiN/78MMP6dy5M++//z6jRo0iNjaW7t27F/ml9bHHHqNNmza89tprjqqqxo0b079//yI7ppXm8ssvZ+3atbzyyiv88MMPTJw4kdDQUDp37szYsWPL3GUtMTGRhQsX8uijj/LGG2+QlZVF586dmTZtGpdcconjuLvvvpvPP/+ccePGceLECRo1asSIESN48skni5xr6dKl/Pe//+W7777jrbfeok6dOnTo0IGXXnqpzM+hbdu2fPPNNzz55JM8/PDD1KtXj3vvvZf4+HjuuOOOcv8NSlK/fn3mzZvHfffdx4svvkidOnW45557aNCggWPgdGVw5XN57rnnCAsLY9KkSY5f3mfPnl3k376ioqKi+PXXX7nvvvsYP348UVFR3HrrrZx99tlcc801jnDKGTfddBMLFy50DLwvzNmvpZK0b9+eTz75hKeeeoqRI0eSlJTEp59+yuTJk5k/f747n3apbrzxRho0aMCLL77IK6+8QnZ2Ng0bNqR3797cfvvt5T6/d+/erF27lhdeeIGvv/6a/fv3Exsby9lnn80HH3xQZPfBwgYNGsSTTz5JRkaGY9e9wiry/eOvquPnJCIiYrEZ3p5mKiIiIlJNvP766zz44IPs2bOHhg0b+no5IiIiIlWKQikRERERJ2RmZhYZvJ2VlcVpp51Gfn4+f//9tw9XJiIiIlI1qX1PRERExAlXX301TZo0oWvXrqSmpvLZZ5+xadOmSh/6LiIiIlJdKZQSERERccKAAQN47733+Pzzz8nPzycpKYkvv/yyxNlGIiIiIlI+te+JiIiIiIiIiIjXlb6vsIiIiIiIiIiISCVRKCUiIiIiIiIiIl5X42ZK5eXlsWrVKhITEwkIUCYnIiIiIiIiIia73c7Bgwc57bTTCAqqcZGJ19W4f+FVq1bRs2dPXy9DRERERERERPzU0qVL6dGjh6+XUe3VuFAqMTERML/A6tev7+PViIiIiIiIiIi/2L9/Pz179nRkB1K5alwoZbXs1a9fn0aNGvl4NSIiIiIiIiLibzTuxzv0rywiIiIiIiIiIl6nUEpERERERERERLxOoZSIiIiIiIiIiHidQikREREREREREfE6hVIiIiIiIiIiIuJ1CqVERERERERERMTrFEqJiIiIiIiIiIjXKZQSERERERERERGvUyglIiIiIiIiIiJep1BKRERERERERES8TqGUiIiIiIiIiIh4nUIpERERERERERHxOoVSIiIiIiIiIiLidQqlRERERERERETE6xRKiYiIiIiIiIiI1ymUEhERERERERERr1MoJSIiIiIiIiIiXqdQSkREREREREREvE6hlIiIiIiIiIiIeJ1CKREREfGJxYvhqqtg2zZfr8T3xiwYw/AZwzEMw9dLEREREfEahVIiIiLiE++/D99/D5Mn+3olvpWWncYTvz7Bm8veZMuxLb5ejoiIiIjX+DSUGrNgDD3e7UH0mGgSXkngyi+vZPORzWU+57uN39H9ne7EvRhH5AuRdJ3UlU/XfOqlFYuIiIinZGaabw8f9u06fG35vuUYmBVSO47v8PFqRERERLwnyJcX/23XbwzrMYweDXqQZ8/j8V8fp/9n/dnw7w1EhkSW+Jza4bV5ovcTtKvbjpDAEKb/PZ3bf7idhMgEBrQa4OXPQERERNyVk2O+PXLEt+vwtaV7lzre3358uw9XIiIiIuJdPg2lZt48s8jHH13xEQmvJrBi/wr6NO1T4nPOa3ZekY/vP/N+Pl7zMX8k/6FQSkREpApRKGVatm+Z4/0dKaqUEhERkZrDr2ZKpWanAmY1lDMMw+CX7b+w+ejmUkMsERER8U8KpUyFK6UUSomIiEhN4tNKqcLshp0HZj5Ar8a96JjQscxjU7NSaTiuIdn52QTaAnnrkre4sOWFJR6bnZ1Ndna24+P09HSPrltERETck5trvq3JodS+9H3sSdvj+FgzpURERKQm8ZtQathPw1h3aB1/3PFHucdGh0az+p7VnMg5wS/bf2HkrJG0qNWiWGsfwJgxY3jmmWcqYcUiIiJSEaqUgmV7zda9iOAIMnIzVCklIiIiNYpftO8NnzGc6VumM2/IPBrFNCr3+ABbAK1qt6Jrva48dPZDXJt0LWP+GFPisaNHjyY1NdVx27Bhg6eXLyIiIm6wQqnMTMjI8O1afMVq3bu0zaUAHMs8Rlp2mi+XJCIiIuI1Pg2lDMNg+IzhTN00lV9v/ZXmtZq7dR67YSc7L7vEx0JDQ4mJiXHcoqOjK7JkERER8RArlIKaWy1lDTm/oPkF1I2oC6iFT0RERGoOn4ZSw2YM47O1nzH56slEh0Zz4MQBDpw4QGZupuOYW6feyui5ox0fj1kwhjnb5rD9+HY2Ht7I2IVj+XTtp9zc+WZffAoiIiLippoeStkNuyOU6tGgB83jzD/OqYVPREREagqfzpSauHwiAOd9fF6R+z+84kNu63obAMmpyQTYCrKzk7kn+feMf7MnbQ/hQeG0q9uOz676jEEdB3lp1SIiIuIJVT2U+iP5D/Lt+Zzb7Fy3nr/12FZSslIICwqjY0JHmtdqzrJ9y1QpJSIiIjWGT0Mp42mj3GPm3za/yMfP9X2O5/o+V0krEhEREW+pyqHUz1t+5tIvLiXQFkjyg8nUi6rn8jmseVKn1z+d4MBgR6XU9uPbPbpWEREREX/lF4PORUREpOapqqHU+kPrGfTNIOyGnVx7LrO3zXbrPFYo1bNBTwC174mIiEiNo1BKREREfCI3t+D9qhJKHT55mMu+uIz0nHRCA0MBmLVtllvncoRSDf8JpWoplBIREZGaRaGUiIiI+ERVq5TKzsvmqilXsSNlBy1qteDLa78EYNbWWeTb8106V05+DqsPrAagR8MeQEGl1M6UnRhG+SMORERERKo6hVIiIiLiE1UplDIMg7um3cWfu/8kNjSW6TdM55LWlxATGsPRzKOs3L/SpfP9dfAvsvOzqRVWi5a1WgLQJLYJNmxk5GZw6OShyvg0RERERPyKQikRERHxiaoUSr34x4t8uvZTAm2BfH3d17SPb09wYDD9WvQDYObWmS6dr3Drns1mAyA0KJRGMY0AtfCJiIhIzaBQSkRERLwuP9+8Wfw5lPpu43c8/uvjAPzv4v9xYcsLHY9d1PIiAGZuczGU2ld0npTFMVfquEIpERERqf4USomIiIjXFR5yDv4bSq3cv5Jbpt4CwH097+PfPf5d5PEBrQYAsHjPYo5nHnf6vKcOObdoBz4RERGpSRRKiYiIiNcVbt0DM5Tyt9nee9P2ctkXl5GRm8GAlgMYN2BcsWOaxDahfd322A07v+z4xanzpmens/HwRgB6NOhR5DFHKKVKKREREakBFEqJiIiI151aKZWbC+npvllLSTJyM7jiyyvYl76PpPgkplw7haCAoBKPvajVPy18Ts6VWrF/BQYGTWKbkBiVWOQxq31ve8r2CqxeREREpGpQKCUiIiJeZ1VKBQZCRIT5vr+08NkNO7dOvZUV+1dQN6Iu026YRmxYbKnHFw6lDCfKvUpr3QNVSomIiEjNolBKREREvM4KpUJCoG5d831/CaU+WPUB3278lpDAEKYOmkqLWi3KPL53k96EBYWxN30vGw5vKPf8jlCqQQmh1D+VUsmpyeTZ89xYvYiIiEjVoVBKREREvM6fQ6lfd/wKwKO9HuWcJueUe3x4cDjnNTsPcK6Fr6xKqQbRDQgJDCHfyGdP2h4XVi0iIiJS9SiUEhEREa+zQqngYP8LpdYfXg+UHBqV5qKW/7TwbSs7lDpw4gC703Zjw8bp9U8v9niALYCmsU0BtfCJiIhI9adQSkRERLzOXyul8ux5bDqyCYAO8R2cfp41V+r3Xb9zMudkqcct27sMgKT4JKJDo0s8xmoX3JGiUEpERESqN4VSIiIi4nX+GkptO7aNnPwcIoIjaBrX1OnntanThqaxTcnJz2H+zvmlHldW655Fw85FRESkplAoJSIiIl7nr6GU1bqXFJ9EgM35l0k2m81RLTVr26xSj1u6z4lQ6p9h56qUEhERkepOoZSIiIh4XW6u+dbvQqlDZijlSuuexQqlSht2bhiGa5VSCqVERESqnBdfBJsNHnig4L6sLBg2DOrUgagouOYaOHiw6POSk+GSSyAiAhISYNQoyDtlI9758+H00yE0FFq1go8+Kn79N9+EZs0gLAzOOAOWLi36uDNr8SaFUiIiIuJ1/l4p5U4o1bd5X4ICgthybAvbjm0r9vjWY1tJyUohNDCUTgmdSj2PVSm1/fh2l9cgIiIivrNsGbz9NnTuXPT+Bx+EadPg66/ht99g3z64+uqCx/PzzUAqJwcWLoSPPzYDp6eeKjhmxw7zmPPPh9WrzdDrzjthVqEC7SlTYORIePppWLkSunSBAQPg0CHn1+JtCqVERETE6/w1lFp3aB0AHRJcD6ViQmM4u/HZQMktfMv2mUPOT6t/GsGBwaWex6qUOnDiAJm5mS6vQ0RERLzvxAm46SZ4912oVavg/tRUeP99GDcO+vaFbt3gww/N8GnxYvOY2bNhwwb47DPo2hUuvhiefdaserJeM02aBM2bw9ix0L49DB8O114Lr71WcK1x4+Cuu+D22yEpyXxORAR88IHza/E2hVIiIiLidf4YSuXm5/L30b8B6JjQ0a1zXNSy9LlSjta9BqW37gHUDq9NdIi5M9/OlJ1urUNEREQqJj09nbS0NMctOzu7zOOHDTMrmfr1K3r/ihXm2ILC97drB02awKJF5seLFkGnTpCYWHDMgAGQlgbr1xccc+q5BwwoOEdOjnmtwscEBJgfW8c4sxZvUyglIiIiXmeFUsHBBaHU0aNgt/tuTVuObSHXnkt0SDSNYxq7dQ5rrtQv238hJz+nyGPOzJMCc2i6hp2LiIj4VlJSErGxsY7bmDFjSj32yy/NdrmSDjlwwPwjXFxc0fsTE83HrGMKB1LW49ZjZR2TlgaZmeYf9/LzSz6m8DnKW4u3BfnmsiIiIlKTFa6UqlPHfN9uh5QUqF3bN2uyhpwnxSdhs9ncOkeXel1IjEzk4MmD/Jn8J+c3Px8wq7BW7l8JlB9KAbSo1YK1B9ey47hCKREREV/YsGEDDRs2dHwcGhpa4nG7d8P998OcOeZwcXGNKqVERETE6wqHUiEhEBNjfuzLFr6KDDm3BNgCGNBqAFB0F76/Dv1Fdn42cWFxtKrdqtzzaAc+ERER34qOjiYmJsZxKy2UWrHCHCR++ukQFGTefvsN/vc/8/3ERPN1T0pK0ecdPAj16pnv16tXfAc86+PyjomJgfBws/I8MLDkYwqfo7y1eJtCKREREfG6wqEU+MdcKUco5caQ88IGtPwnlNpWEEot22sOOe/RoIdTVVgKpURERKqGCy6Av/4yd8Szbt27m0PPrfeDg+GXXwqes3kzJCfDWWeZH591lnmOwrvkzZljBk5JSQXHFD6HdYx1jpAQc3B54WPsdvNj65hu3cpfi7epfU9ERES8LjfXfFs4lNq+3ceh1KGKV0oBXNjiQmzYWHtwLfvS99EguoHT86QsjplSat8TERHxa9HR0PGU/VEiI83xBNb9Q4fCyJHmiIKYGLjvPjMEOvNM8/H+/c3w6ZZb4OWXzflOTz5pDk+3CrTuuQcmTIBHHoE77oBff4WvvoKffiq47siRMGSIGYT17Amvvw4nT5q78QHExpa/Fm9TKCUiIiJe52+VUjn5OWw5tgWoeKVUfGQ83Rt0Z9m+ZczeNpvbut7G0n0uhlL/VEptP769QmsRERER33vtNXMnvGuugexsc9e8t94qeDwwEKZPh3vvNQOiyEgzXPrvfwuOad7cDKAefBDGj4dGjeC998xzWQYNgsOH4amnzGCra1eYObPo8PPy1uJtCqVERETE6/wtlPr76N/k2fOICY2hYXTD8p9QjotaXcSyfcuYuXUm17S/xlGF1aNBD6ee3yyuGQCp2akczzxOrfBaFV6TiIiIeMf8+UU/DguDN980b6Vp2hRmzCj7vOedB6tWlX3M8OHmrTTOrMWbNFNKREREvM7fQqnCrXvu7rxXmDVXava22SzbtwwDg8YxjakfXd+p50eGRJIQmQBorpSIiIhUXwqlRERExOusUCo42Hzr81DKAzvvFXZGozOIDY3leNZxJi6fCECPhs5VSVkcw841V0pERESqKYVSIiIi4nV+VynloZ33LEEBQVzY8kIAvtnwDQA9Gzg3T8riGHauSikRERGpphRKiYiIiNf5XSjloZ33Cruo5UVFPnZ2yLmlRVwLQJVSIiIiUn0plBIRERGvy8013/pDKJWdl83WY1sBz1VKAQxoVbAdjg0b3Rp0c+n5qpQSERGR6k6hlIiIiHidP1VKbT66mXwjn7iwOOpHOTeI3BmNYho5Kq/ax7cnJjTGpec7ZkoplBIREZFqSqGUiIiIeF1podTx45CX5921rDu0DvDcznuFDWw9EICzGp3l8nOtSqmdKTuxG3aPrktERETEHwT5egEiIiJS85waStWqBTYbGAYcOwYJCd5bS2XMk7I82edJYkNjuf20211+buOYxgTYAsjKy+LAiQM0iG7g8fWJiIiI+JIqpURERMTrrFAqONh8GxRkBlPg/RY+T++8V1hMaAxP9HnCrUApODCYxjGNAQ07FxERkepJoZSIiIh43amVUuC7uVKOUKoSKqUqSsPORUREpDpTKCUiIiJe5y+hVGZuJtuObQMqp1KqohzDzlUpJSIiItWQQikRERHxOn8JpTYd2YSBQe3w2iRGJnrvwk7SDnwiIiJSnSmUEhEREa/LzTXf+jqUKty65+md9zyhRa0WgEIpERERqZ4USomIiIjX+UulVGXuvOcJjplSat8TERGRakihlIiIiHid34RSlbjznidY7Xu703aTm5/r49WIiIiIeJZCKREREfE6vwul/LRSql5UPcKCwrAbdnan7fb1ckREREQ8SqGUiIiIeJ0VSgUHF9zn7VAqIzfD0Rbnr5VSNpuNZnHNANh+fLtvFyMiIiLiYQqlRERExOv8oVJq4+GNGBjUjahLQmSCdy7qBscOfJorJSIiItWMQikRERHxOn8Ipfy9dc/iCKW0A5+IiIhUMz4NpcYsGEOPd3sQPSaahFcSuPLLK9l8ZHOZz3l3xbv0/rA3tV6qRa2XatHvk34s3bvUSysWERERTygrlEpPh+zsyl+DtfNex4SOlX+xCnDswKdQSkRERKoZn4ZSv+36jWE9hrF46GLm3DKHXHsu/T/rz8mck6U+Z/6u+dzQ8QbmDZnHoqGLaBzbmP6f9mdv2l4vrlxEREQqIvefjeQKh1KxsRAYaL5/9Gjlr6HKVUqpfU9ERESqmSBfXnzmzTOLfPzRFR+R8GoCK/avoE/TPiU+5/OrPy/y8XuXvce3G77llx2/cGuXWyttrSIiIuIZhlFypVRAANSpA4cOmS18DRpU7jocoZSfDjm3tKjVAlCllIiIiFQ/fjVTKjU7FYDa4bWdfk5Gbga59lyXniMiIiK+k59vBlNQNJQC782VOpFzgp0pO4EqUCn1T/veoZOHyqwmFxEREalq/CaUsht2Hpj5AL0a93JptsOjcx+lQXQD+rXoV+Lj2dnZpKWlOW7p6emeWrKIiIi4waqSAggOLvqYt0KpDYc3AJAYmUidiDqVe7EKiguLIy4sDsARpImIiIhUB34TSg37aRjrDq3jy2u/dPo5L/7xIl+u+5Kpg6YSFhRW4jFjxowhNjbWcUtKSvLUkkVERMQNhUMpX1VKWUPO/b11z6Id+ERERKQ68otQaviM4UzfMp15Q+bRKKaRU895deGrvPjHi8y+ZTadEzuXetzo0aNJTU113DZs2OCpZYuIiIgb/KFSqqoMObdYLXzbj2/38UpEREREPMeng84Nw+C+n+9j6qapzB8y3/GCqzwv//kyzy94nlk3z6J7g+5lHhsaGkpoaKjj47S0tAqtWURERCrGCqWCg8FmK/qYQqmSaQc+ERERqY58GkoNmzGMyX9N5ofBPxAdGs2BEwcAiA2NJTw4HIBbp95Kw+iGjOk3BoCX/niJp+Y/xeSrJ9MsrpnjOVEhUUSFRPnmExERERGn5eaab09t3QO175VG7XsiIiJSHfk0lJq4fCIA5318XpH7P7ziQ27rehsAyanJBNgCijwnJz+Ha7++tshznj73af5z3n8qcbUiIiLiCVallK9CqbTsNHan7QaqUKVULYVSIiIiUv34tn3vaaPcY+bfNr/Ixzsf2Fk5ixERERGv8HUoZe28Vz+qPrXCa1XehTyocPueYRjYTu17FBEREamC/GLQuYiIiNQcvg6lqlrrHkCzuGYApOekcyzzmG8XIyIiIuIhCqVERETEqwoPOj+VV0KpKjbkHCA8OJz6UfUBtfCJiIhI9aFQSkRERLzKmUqpzEzIyKic61fFUAoKzZXSDnwiIiJSTSiUEhEREa8qK5SKiiq4v7Kqpapi+x5oBz4RERGpfhRKiYiIiFeVFUrZbJXbwpeSlcLe9L0AJMUnef4ClajwsHMRERGR6kChlIiIiHhVbq75tqRQCio3lLJ23msY3ZC4sDjPX6AS1YuqB8CRzEocuCUiIiLiRQqlRERExKvKqpSCyg2lqmrrHuAI0VKyUny6DhERERFPUSglIiIiXuXTUKqKDjkHhVIiIiJS/SiUEhEREa+yQqng4JIfr8xQat2hdYBCKRERERF/oFBKREREvMovKqXUviciIiLicwqlRERExKt8FUqlZqVy4MQBoOrtvAdFQynDMHy7GBEREREPUCglIiIiXuWrUGp32m4AaoXVIiY0xrMn9wIrlMqz55GRm+HbxYiIiIh4gEIpERER8arcXPOt10OpVDOUahzb2LMn9pKI4AiCAoIAtfCJiIhI9aBQSkRERLzK15VSjWOqZihls9k0V0pERESqFYVSIiIi4lWuhFKeHJ20J20PAI1iGnnupF6mUEpERESqE4VSIiIi4lXlhVJ16phvc3MhPd1z163qlVKgUEpERESqF4VSIiIi4lVWKBUcXPLjERHmDTzbwlfVZ0qBQikRERGpXhRKiYiIiFeVVykFlTNXSpVSIiIiIv5FoZSIiIh4lS9CKcMwqsdMqdA4QKGUiIiIVA8KpURERMSrfBFKHc86TkZuBlC1Q6nYsFhAoZSIiIhUDwqlRERExKtyc8233gylrHlSdSPqEh4c7pmT+oDa90RERKQ6USglIiIiXuWLSqnq0LoHhUKp7BSfrkNERETEExRKiYiIiFf5IpSqDkPOQZVSIiIiUr0olBIRERGv8kkolapQSkRERMTfKJQSERERr7JCqeDg0o/xePteutm+1zi2eoRSqVmpvl2IiIiIiAcolBIRERGv8mWlVLWZKaVKKREREakGFEqJiIiIV2mmlPsKh1KGYfh2MSIiIiIVpFBKREREvMqVUOroUbDbK3Y9wzAcu+9Vl/a9XHsumXmZvl2MiIiISAUplBIRERGvys0135YVStWpY7612yElpWLXO5p5lKy8LAAaRjes2Ml8LDI4kkBbIKAWPhEREan6FEqJiIiIVzlTKRUSAjEx5vuHD1fsetY8qYTIBEKDQit2Mh+z2WyaKyUiIiLVhkIpERER8SpnQinw3Fyp6jJPyqJQSkRERKoLhVIiIiLiVVYoFRxc9nGeCqWqyzwpi0IpERERqS4USomIiIhXeb1S6p/2vUbRjSp2Ij+hUEpERESqC4VSIiIi4lU+a99TpZSIiIiIX1EoJSIiIl7l7VDK0b6nmVIiIiIifkWhlIiIiHiNYUBenvm+tyulGsWofU9ERETEnyiUEhEREa/JzS143xuhlN2wa9C5iIiIiJ9SKCUiIiJeY7XugXdCqcMnD5OTn4MNGw2jG7p/Ij+iUEpERESqC4VSIiIi4jXeDqWsKql6UfUIDgx2/0R+JDY0FlAoJSIiIlWfQikRERHxGiuUstkgMLDsYz0RSlW3eVKgSikRERGpPhRKiYiIiNc4u/MeFIRSKSlFZ1G5YneqGUpVl3lSoFBKREREqg+FUiIiIuI1roRStWqZFVUAx465dz3HkPMYhVIiIiIi/kahlIiIiHiNK6FUUJAZTIH7LXzVvX3PMAzfLkZERESkAhRKiYiIiNdYbXjOhFJQ8blSVihVHSulcu25ZOZl+nYxIiIiIhWgUEpERES8xpVKKfBAKFUNZ0pFhUQRYDNfwqVmpfp4NSIiIiLuUyglIiIiXuPNUMpu2NmbvheoXu17NptNc6VERESkWvBpKDVmwRh6vNuD6DHRJLySwJVfXsnmI5vLfM76Q+u55qtraPZ6M2zP2Hh98eveWayIiIhUmBVKBQc7d3xFQqmDJw6SZ88jwBZAg+gGrp/AjymUEhERkerAp6HUb7t+Y1iPYSweupg5t8wh155L/8/6czLnZKnPycjNoEVcC17s9yL1oup5cbUiIiJSUd6slLLmSdWPqk9QQJDrJ/BjCqVERESkOvDpK7SZN88s8vFHV3xEwqsJrNi/gj5N+5T4nB4Ne9CjYQ8AHpv7WKWvUURERDzHm6HUnrQ9QPWaJ2VRKCUiIiLVgV/92TA12xzWWTu8tsfOmZ2dTXZ2tuPj9PR0j51bREREXOPVSql/hpxXp3lSFoVSIiIiUh34zaBzu2HngZkP0KtxLzomdPTYeceMGUNsbKzjlpSU5LFzi4iIiGtyc8233mzfaxxTDSulQuMAhVIiIiJStflNKDXsp2GsO7SOL6/90qPnHT16NKmpqY7bhg0bPHp+ERERcZ5P2veqYyilSikRERGpBvyifW/4jOFM3zKd32/73eMl9qGhoYSGhjo+TktL8+j5RURExHm+GHReWe17P/0EmZlw7bWVcvoyKZQSERGR6sCnoZRhGNz3831M3TSV+UPm07xWc18uR0RERCqZq6FUXJz59sQJyM+HwEDnr2XNlKqMQed5eXDddWY74oABEB3t8UuUyRFKZad498IiIiIiHuTTUGrYjGFM/msyPwz+gejQaA6cOABAbGgs4cHhANw69VYaRjdkTL8xAOTk57Dh8AbH+3vT9rL6wGqiQqJoVbuVbz4RERERcYoVSgUHO3d8eHjB+1lZEBnp3PPy7fnsS98HVE77XmameQNIT/dhKKVKKREREanCfBpKTVw+EYDzPj6vyP0fXvEht3W9DYDk1GQCbAWjr/al7+O0t09zfPzqold5ddGrnNv0XObfNr+SVywiIiIV4WqlVFhYwfuuhFIHThwg38gn0BZIvah6ri3SCRkZBe8X2uTXaxRKiYiISHXg2/a9p41yjzk1aGoW18yp54mIiIj/cTWUCgoyb3l5BZVJzrDmSTWIbkBggAs9f04qvJasLI+fvlwKpURERKQ68Jvd90RERKT6czWUgoIWPpdCqUqcJ3XqWnxRKRUbFgsolBIREZGqTaGUiIiIeE1urvnWnVDKlYqkPWl7gMqZJwX+VSllGKogFxERkapJoZSIiIh4jTuVUtZcKXfa9xrFNHL+SS7wl1AqJz+HrDwfLEBERETEAxRKiYiIiNd4rX3vn1DKG5VSvmjfiwqJcmwEoxY+ERERqaoUSomIiIjXWKFUcLDzz6lQ+54XZkr5olIqwBZAbKjmSomIiEjVplBKREREvMZr7Xup3quU8kUoBdqBT0RERKo+hVIiIiLiNd5o38uz57H/xH6g8mZKZWQUvO+L9j1QKCUiIiJVn0IpERER8ZqKhFLOViTtS9+H3bATHBBMYlSiawt0kj9VSqVmp/pmASIiIiIVpFBKREREvMYb7XvWPKmGMQ0dw8A9zdeDzkGVUiIiIlL1KZQSERERr8nNNd9WZvueNU+qslr3Tl2LryulFEqJiIj4zsSJ0LkzxMSYt7POgp9/Lng8KwuGDYM6dSAqCq65Bg4eLHqO5GS45BKIiICEBBg1CvLyih4zfz6cfjqEhkKrVvDRR8XX8uab0KyZ+Qe9M86ApUuLPu7MWrxNoZSIiIh4jTdmSu1Oq9wh56euRaGUiIhIzdWoEbz4IqxYAcuXQ9++cMUVsH69+fiDD8K0afD11/Dbb7BvH1x9dcHz8/PNQConBxYuhI8/NgOnp54qOGbHDvOY88+H1avhgQfgzjth1qyCY6ZMgZEj4emnYeVK6NIFBgyAQ4cKjilvLb6gUEpERES8xhszpaz2PW+FUmrfExERqbkuuwwGDoTWraFNG3j+ebMKafFiSE2F99+HcePMsKpbN/jwQzN8WrzYfP7s2bBhA3z2GXTtChdfDM8+a1Y9Wa+bJk2C5s1h7Fho3x6GD4drr4XXXitYx7hxcNddcPvtkJRkPiciAj74wHzcmbX4gkIpERER8RrrxVVwsPPPcXWmlFUppfY9ERER8ab8fPjySzh50mzjW7HCHF3Qr1/BMe3aQZMmsGiR+fGiRdCpEyQW2ptlwABISyuotlq0qOg5rGOsc+TkmNcqfExAgPmxdYwza/GFIN9dWkRERGoar7Tv/TNTqnGsKqVERETEPenp6aSlpTk+Dg0NJTQ0tMRj//rLDKGysswqqalTzWql1avN1zxxcUWPT0yEAwfM9w8cKBpIWY9bj5V1TFqa+Zrk+HEzECvpmE2bCs5R3lp8QZVSIiIi4jXVpX0vI6PgfVVKiVSugycOcujkofIPFBHxoKSkJGJjYx23MWPGlHps27ZmALVkCdx7LwwZYrbkSflUKSUiIiJe404o5Ur7Xk5+DgdOmH/u81allEIpkcqTmZtJ50mdMQyD9f9eT3xkvK+XJCI1xIYNG2jYsKHj49KqpMB8XdOqlfl+t26wbBmMHw+DBpmvfVJSilYoHTwI9eqZ79erV3yXPGtHvMLHnLpL3sGD5m5/4eEQGGjeSjqm8DnKW4svqFJKREREvCY313xbWe17+9L3YWAQEhhC3Yi6ri/QSWrfE/GOtQfXcujkIQ5nHOaZ357x9XJEpAaJjo4mJibGcSsrlDqV3W6+PujWzZyj+csvBY9t3gzJyWa7H5hv//qr6C55c+aYgVNSUsExhc9hHWOdIyTEvFbhY+x282PrGGfW4guqlBIRERGvqez2PWueVKOYRgTYKu9vb6qUEvGO1QdWO96ftHwSw3sOp13ddr5bkIjIKUaPNnfMa9IE0tNh8mSYPx9mzYLYWBg6FEaOhNq1zaDpvvvMEOjMM83n9+9vhk+33AIvv2zOd3rySRg2DKwc7J57YMIEeOQRuOMO+PVX+Oor+OmngnWMHGm2DXbvDj17wuuvmwPXb7/dfNyZtfiCQikRERHxmspu3/PGPKlT1+LrSqns/Gyy8rIICwrzzUJEKpEVSgXYAsg38hk1ZxTTbpjm20WJiBRy6BDceivs328GP507m4HUhReaj7/2mrkT3jXXmK8ZBgyAt94qeH5gIEyfbs6iOussiIw0w6X//rfgmObNzQDqwQfNtsBGjeC998xzWQYNgsOH4amnzGCra1eYObPo8PPy1uILCqVERETEayp7973daQWVUpXJHyqlokKiCLAFYDfspGSlUC/KhwMhRCrJmoNrAPi/Pv/H8wueZ/rf0/ll+y9c0OICH69MRMT0/vtlPx4WBm++ad5K07QpzJhR9nnOOw9WrSr7mOHDzVtF1uJtmiklIiIiXmOFUsHBzj/HnfY9b1ZK+SqUCrAFEBMaA6iFT6qnfHs+aw+uBWBQh0Hc2/1eAB6a/RD59nxfLk1ERDxEoZSIiIh4TWVXSu1J/6d9rxJ33jt1Lb5q3wPNlZLqbdvxbZzMPUl4UDht6rTh6XOfJi4sjjUH1/DJmk98vTwREfEAhVIiIiLiFfn55k4wUHkzpbxVKZWRUfC+ryqlQKGUVG/WPKmOCR0JDAikTkQdnuz9JABP/PoEJ3JO+HB1IiLiCQqlRERExCusKinQTClPUSgl1ZkVSnWt19Vx3/Cew2lRqwX7T+zn1YWv+mZhIiLiMQqlRERExCtycwvedyeUKi/8yc7L5tDJQ0Dltu/l5Zk3x3XVvidSKUoKpUKDQnmp30sAvPzny+xN2+uDlYmIiKcolBIRERGvKFwp5cqg88Lte4ZR+nF7081fTsOCwqgTXseNFTrn1IotVUqJVA5r573CoRTANe2voVfjXmTmZfLkvCd9sDIREfEUhVIiIiLiFVYoFRQEAS68ArEqpQyjaLB1KmueVKOYRthsNjdXWb5TQymfVkqFxgGQmpXqu0WIVIJDJw+xL30fNmx0SuhU5DGbzcbY/mMB+Hj1x6zaX84e6SIi4rcUSomIiIhXWIGSK1VSUBBKQdlVSdY8qcoecl5SpVRZFVyVSZVSUl2tOWBWSbWq3Yro0Ohij5/R6Axu6HgDBgYPzX4Iw1ffhCIiUiEKpURERMQrrFDKlXlS1vFW4VNZw873pO0BKneeVOE1WNVehlF0XpY3OUKp7BTfLECkkljzpLrU61LqMWMuGENoYCjzds5j2t/TvLQyERHxJIVSIiIi4hXuhlI2W9G5UqWx2ve8VSkVG1twn69a+FQpJdXV6oOrAeia2LXUY5rGNeXBMx8EYNScUeTm+ygdFhERtymUEhEREa9wN5QC53bgs9r3GsU0cv0CLsjIMN/GxRXc56th5wqlpLqy2vdOHXJ+qtG9RxMfEc/fR/9m0vJJXliZiIh4kkIpERER8Qqrxc2dUMqZSilH+56XKqWiogrmY6lSSsRzMnMz2XRkE1B+KBUTGsN/z/8vAM/89gzHM49X9vJERMSDFEqJiIiIV3iiUqrM9j1r0LmXZkqFhxeEZaqUEvGc9YfXk2/kUzeiLg2iG5R7/J2n30lSfBJHM48y/OfhGnouIlKFKJQSERERr6jMUCozN5MjGUeAym/fKxxKhYaa7yuUEvEca8h513pdsVm7HJQhKCCItwa+RaAtkMl/TeaFBS9U8gpFRMRTFEqJiIiIV1TmTKm96XsBiAiOoFZYLTdW57ySKqV83b6XlZdFVp6PkjERD3PsvJdY+s57pzq32blMGDgBgCfnPck3G76pjKWJiIiHKZQSERERr7BCKWsOkyvKmylVeOc9ZyorKsKf2veiQ6OxYX6+qVmpvlmEiIcVrpRyxT3d72FEzxEA3Dr1VpbvW+7hlYmIiKcplBIRERGvqMz2PW/tvFd4DYXb93xVKRVgCyA2LBZQC59UD3bDztqDawHXQymAsQPGcnGri8nMy+TyLy53bIAgIiL+SaGUiIiIeEVltu85KqUqecg5+FelFGiulFQvO47vID0nndDAUNrWaevy84MCgvjy2i/pEN+B/Sf2c/kXl3My52QlrFRERDxBoZSIiIh4RUVCqbLa9wzDYMbWGQC0iGvh5uqc50+DzkGhlFQvVutex4SOBAe60esLxITGMO2GadSNqMuqA6u49ftbsRt2D65SREQ8RaGUiIiIeEVurvnW0+173278loW7FxIeFM4dp93h/gKdlJFhvo2I8P2gc4DYULXvSfXh7jypUzWv1ZzvB31PSGAI3238jid/fbLiixMREY9TKCUiIiJeURnte9l52Twy5xEAHun1CA1jGlZghc5R+55I5Vl9cDXg2s57penVpBfvXfYeAGP+GMPHqz+u8DlFRMSzFEqJiIiIV1RG+96EpRPYkbKD+lH1GXX2qIot0Elq3xOpPJ6qlLLc0uUWHj/ncQDumnYXfyT/4ZHzioiIZyiUEhEREa+wQqlgN8bElNS+dyTjCM/+/iwAz/d9nsiQyAqu0DklVUr5sn1PoZRUF0czjjp2y+uc2Nlj532277Nc0/4acu25XDXlKrYf3+6xc4uISMUolBIRERGv8HT73n9/+y+p2al0SezCrV1urfgCnaRKKZHKsebgGgBa1GpBbFisx84bYAvg4ys/plv9bhzJOMKVX15Jvj3fY+cXERH3KZQSERERr/BEKGUFQpuPbGbi8okAjO0/lsCAQA+s0Dl+WymVneK7RYh4gKdb9wqLDInkxxt+JCokir8O/cXag2s9fg0REXGdT0OpMQvG0OPdHkSPiSbhlQSu/PJKNh/ZXO7zvl7/Ne0mtCPsuTA6TezEjC0zvLBaERERqQhPzpR6ZO4j5NnzuLTNpVzQ4gLPLNBJGnQuUjkcoVRi10o5f4PoBpzT5BwAft/1e6VcQ0REXOPTUOq3Xb8xrMcwFg9dzJxb5pBrz6X/Z/05mXOy1Ocs3L2QG769gaGnDWXV3au4su2VXPnllaw7tM6LKxcRERFXeapSat6Oefy4+UcCbYG8cuErnlugk/y1fS81K9V3ixDxACuU6lKv4jvvlaZPkz4ALEheUGnXEBER5/k0lJp580xu63obHRI60KVeFz664iOSU5NZsX9Fqc8Zv2Q8F7W6iFG9RtE+vj3P9n2W0+ufzoSlE7y4chEREXFVbq75tmIzpQwemv0QAPd0v4d2ddt5aHXOy8gw30ZE+Fn7niqlpArLzstm45GNQOW071l6N+0NmJVShmFU2nVERMQ5fjVTKjXb/Atf7fDapR6zaPci+rXoV+S+AS0HsGjPokpdm4iIiFSMJ9r3ko8cZtWBVcSGxvL0uU97bnEuUPueiOdtOLyBPHsetcJq0TimcaVdp0eDHoQGhnI44zB/H/270q5TGQzDYMW+FWTl+fAHjoiIh/lNKGU37Dww8wF6Ne5Fx4SOpR534MQBEiMTi9yXGJXIgRMHSjw+OzubtLQ0xy09Pd2j6xYRERHneKJ9b+eRQwA80fsJ4iPjPbQy15TUvqdKKZGKKTzk3GazVdp1QoNCOaPRGUDVmyv17cZv6f5udx6Z84ivlyIi4jF+E0oN+2kY6w6t48trv/ToeceMGUNsbKzjlpSU5NHzi4iIiHOsUCo42PXnWqFUXk4gzeOac98Z93luYS7y10qpzLxMsvN8mI6JVEBl7rx3qqo6V2r5vuUAzN8537cLERHxIL8IpYbPGM70LdOZN2QejWIalXlsvah6HDx5sMh9B08cpF5UvRKPHz16NKmpqY7bhg0bPLZuERERcV5FKqXS8w+b7+SG82K/FwkLCvPcwlzkb4POY0JjsGFWllijEESqmtUHVwPeCaUKz5WqSnak7ABg45GNCqBFpNrwaShlGAbDZwxn6qap/HrrrzSv1bzc55zV+Cx+2fFLkfvmbJ/DWY3OKvH40NBQYmJiHLfo6GiPrF1ERERcU5FQ6v2/zA1Ngu0xXJd0nQdX5Zr8/IKB7YUrpXzZvhdgCyAmNAZQC59UTYZhsObAGsA7odRZjc4i0BbIrtRdJKcmV/r1PGXHcTOUyrPnseGw/tAuItWDT0OpYTOG8dnaz5h89WSiQ6M5cOIAB04cIDM303HMrVNvZfTc0Y6P7z/jfmZuncnYhWPZdGQT/5n/H5bvW87wnsN98SmIiIiIk9wNpVYfWM33274wn0tMpc6bKU9mwUsUv2nfA82VkqptV+ouUrNTCQ4I9sqOmtGh0ZxW/zQAFuyqOi18249vd7xvtTuKiFR1Qb68+MTlEwE47+Pzitz/4RUfclvX2wBITk0mwFaQnZ3d+GwmXz2ZJ+c9yeO/Pk7r2q35fvD3ZQ5Hl8plGLBlC7RoAUE+/YoSERF/ZlUYuRpKPTz7YQgy06CcLN/+j6ZwKBUW5h+DzsEMpXal7nI6lDqeeZwXFrzABS0u4KJWF1Xo2luPbeXt5W9zbdK1jgHSUrMZhsEbS98gKiSKO067o9zjrYClQ0IHQgLdKKV0Q58mfVi+bzkLkhdwU+ebvHLNikjPTudo5lHHxxUNpbLzshnzxxiuS7qODgkdKrg6EanOrr7a+WO/+8718/v0lZ3xtFHuMfNvm1/svus6XMd1HXxXui9F/fADXHUVPPYYjBnj69WIiIi/cqdSal/6Pn7Z8QsBwQnYMYOt/HwIDKyUJZYrI8N8GxoKAQFVt1Lqg1Uf8OqiV3l10atc0fYKXhvwmlNjFAo7mXOSMX+M4ZWFr5CTn8M7K99h0dBFJMVrU5mabtrf07h/5v2AGVANPX1omcd7c8i5pXfT3oxbPK7KzJWy5klZrBlc7np/1fs889szLNm7hJ9v+rlC5xKR6i02tuB9w4CpU837unc371uxAlJSXAuvCvOLQedStW3caL5dtsy36xAREf/mTii1K2UXAA1r13bc58sAyKqUiogw3/rDoHNwPZRavHex4/0fNv9A+zfb85/5/ykyQqE0hmHw7YZvaf9me55f8Dw5+TnUDq9NWnYal06+lMMnD7vzKUg1kZufy6g5oxwf3/PTPfy287cyn+MIpRK7VuLKijqnyTmAOTS8KnzNWvOkokKiAFhzYA2GUf4f+EuzaM8ix3lERMry4YcFt8REuP562LHDrIr67jvYvh0GD4a6dd07v0IpqbCTJ823u3f7dh0iIuLfrFAqONj55+xOM//n0qRuguO+zPJzk0pTeOc98I9B5+B6KLV071IA3hz4Jn2b9yU7P5tnfnuGpLeS+H7T96X+srvx8Eb6f9afa7++lt1pu2ka25Spg6ayefhmWtRqwY6UHVz91dXaGawGm7R8En8f/Zv4iHiuancVefY8rv7qarYe21rqc3xRKVU3oq6jqu+P5D+8dl13WZVSFzS/gJDAEFKzU9mVusvt81k/A/af2M/xzOMeWaOIVH8ffAAPP1y0Yj0wEEaONB9zh0IpqbDCoVQF/mAjIiLVnDuVUrtT/wml4ho6wix/DKV8XSkVG2rW1jsTSh08cZDk1GRs2Lil8y3MvWUuX1/3NY1jGrMzZSdXTbmKgZMH8vfRvx3PSc9OZ9TsUXSe1Jm52+cSGhjK0+c+zYZhG7iy3ZXUjajLtBumERMawx/Jf/Cv6f+qUBWHVE3HM4/zzG/PAPDs+c/y2dWf0aNBD45lHuPSyZeWGH4czzzuCFe61Ovi1fX2adIHgAXJ/j/s3KqUalOnDR3izRlQ7s6VSslKKfL9vf7w+gqvT0Rqhrw82LSp+P2bNoHd7t45FUpJhVmhVGYmHDvm27WIiIj/ciuU+qdSqlFMI0cQ5E+hVFVs31u2z+y3bx/fnujQaGw2G9cmXcvGYRt5/JzHCQkMYebWmXR8qyOj547mkzWf0HZCW15d9Cp59jwub3s5G4Zt4D/n/YeI4AjHeZPik/jq2q8ItAXyyZpPeOnPlyrjUxU/9vyC5zmaeZSk+CSGnj6UiOAIfhj8A41iGrH56Gau/+Z6cvNzizxn7cG1ADSNber4OvaW3k17A1SJuVJWpVTzuOaOijJ3Q6nl+5YX+Xj9IYVSIuKc22+HoUNh3Dj44w/zNnYs3Hmn+Zg7FEpJhVlDX0EtfCIiUjp3Qqk9aXsAaBzT2BEE+cNMqarcvme17fRs2LPI/ZEhkTx/wfOsu3cdF7e6mFx7Li/++SJDvh/C/hP7aVW7FT/d+BM/DP6BFrValHjuAa0GMP6i8QCM/mU0UzdOdfpzyLPn8caSN2j6elNe+fMVp58n/mHbsW28sfQNAMb2H0tQgLmfUv3o+vw4+EcigiOYu30u98+8v0gVnS9a9yy9m5ih1KoDq0jPTvf69V3hCKVqNadLollR5m4oZf0MsKhSSkSc9eqr8MgjZhDVp495GzcORo2CV9z8X7dCKakwq1IKFEqJiEjpKlIp1Ti2sSMA8qdKKWtNOTnul617gluhVIOeJT7euk5rfrrxJ34c/CMtarUgIjiC5/uaYdXA1gPLPf+wnsMY1mMYADdPvZlV+1eV+5zfd/3O6W+fzoiZI0hOTebD1R+W+xzxL4/98hg5+Tn0b9mfi1pdVOSx0+qfxudXf44NGxOXT2TC0gmOx6xd5HwRSjWObUyzuGbYDbtj8Lc/MgzD0b7niUop62dAp4ROAKw7tK7CaxSRmiEgwAyl9u41d9xLSTHff+QR93dGViglFaZQSkREnJH7T9eOOzOl/L19DwpCN19wNpQyDMPRvndqpVRhNpuNy9pext/D/+bwqMM83vtxQoNCSz3+VK9f9Dr9W/YnIzeDy764jH3p+0o8bl/6Pm7+7mbO/ehc/jr0FzGhMQBsObaFnHwf/oOKS/5M/pNvNnxDgC2AVy98tcRjrmx3JS/2exGAB2Y9wMytMwHfVkoB9GlqzpXy5xa+wxmHOZlrvuBuGtfUMXtrV+oul4eUG4bBkr1LALi9q9lro0opEXFFXh7MnQtffAE2m3nfvn1w4oR751MoJRWmUEpERJzhaqVUbn4uB04cAPynfc9qWT+1Ugp8uy5nQ6ntx7dzLPMYIYEhdErsVO55AwMCi8yNclZQQBBfXfsV7eu2Z2/6Xi7/4nIycgv6/XPyc3h14au0ndCWz/8yK2ju7nY320ZsIyY0hjx7XpFBzOK/7IadkbNHAjD0tKFlfl2NOnsUt3e9Hbth5/qvr2f1gdWOeUa+CqWsFj5/HnZuVUk1jG5IWFAYcWFxNItrBhTM5HLW3vS9HDhxgEBbIDd1vgmAQycPcSTjiEfXLCLV065d0KkTXHEFDBsGhw+b97/0krkrnzsUSkmFKZQSERFnuBpK7Uvfh4FBSGAI8ZHxftW+F/FPTmPtCAhVI5Sy2nZOq3caIYEulKy5ITYslmk3TKNOeB1W7F/BkO+HYDfszNk2hy6TujBqzihO5JzgjIZnsOyuZUy6dBJ1I+o6dhbT8OWqYcq6KSzdu5SokCj+e/5/yzzWZrMx6dJJ9G7Sm/ScdPp+3Jdcey6xobE0jW3qpRUXZVVKLdmzhOw8Hw+HK0XheVIWd1v4lu01KyU7JnQkITLBEW7p+01EnHH//dC9Oxw/XvAHOoCrroJffnHvnAqlpMIUSomISHkMoyCUKhzklMWaJ9UwuiEBtgC/bN+z2fxj2LmroVRZrXue1LJ2S6YOmkpwQDDfbPiGrpO60v+z/mw6son4iHg+uPwDFg5dSLcG3RzPcYRSainye5m5mTz2y2MAPNbrMepF1Sv3OSGBIXw36Dta1GrB8Syz9axLvS7YrB4QL2tduzUJkQlk52c7Wlv9TeF5UpauiV2Bgplczjr1Z4C+30TEFQsWwJNPFv8DY7Nm5mwpdyiUkgorvPtecrLv1iEiIv4rL6/gfWcrpRw778U2BvCL9r1TQykoCKX8oVIqMy+zzFlMS/d5N5QC6N20N+9e9i4Afx36iwBbACN6juDv+/7m9tNuJ8BW9OVohwT9klxVjF8ynuTUZBrFNOLBsx50+nl1I+oy/YbpxIbGAgUBiy/YbDa/nyvlqJSKq3il1Kk/A1SZKCKusNshP7/4/Xv2QHS0e+d0O5Taemwrs7bOIjPXfHVWeGtXqVkKV0rt3evb3YdERMQ/FR4C7mwoZQ05bxxTNJTyp0opKBh27stKKWtAOEBqVmqJx+Tm57Jy/0rAu6EUwJCuQ3jj4jcY3HEwq+5exfiLxzuCtFPpl+Sq4dDJQ7yw4AUAxlwwxuXZY+3j2zP9xulc1e4qhvUcVhlLdJq/z5UqqX3PGna+/tB6pzcFsBt2R/tejwY9AIXAIuKa/v3h9dcLPrbZzAHnTz8NA8vfoLdEQa4+4WjGUQZ9M4hfd/yKzWZjy31baFGrBUN/HEqtsFqMHTDWvZVIlVU4lMrNhYMHoX59361HRET8j1uhVFrBznuAX82U8rdKqcCAQGJCY0jLTiMlK4X4yPhix6w/vJ6svCxiQ2NpVbuV19c4vOdwhvccXu5x1i/JW49tJTsv26Vd/8R7np73NOk56XRv0J0bO93o1jnOaXIO5zQ5x8Mrc51VKfVn8p/k2/MJDHBzX/NKUlL7XtPYpsSGxpKancrGwxsdIVVZNh/ZTHpOOuFB4Y7vM7XviYgrxo6FAQMgKcl83XPjjbBlC9Sta+7G5w6XK6UenPUgQQFBJD+YXOQvIoM6DGLmtpnurUKqrJycgpaMyEjzreZKiYjIqXJzC94PcvJPYlYoVVUqpXwZSkH5c6WsWTI9GvYo1jLnT+pH1ScuLI58I5/NRzf7ejlSgvWH1vPOyncAGNt/rF9/PTmjU0InYkJjSM9JZ83BNb5eThH59nySU835GIUrpWw2m6OFz9k1WzOzujXoRlCA+YO4fXx7bNg4knGEQycPeXDlIlIdNWoEa9bA44/Dgw/CaafBiy/CqlWQkODeOV3+P8jsbbN5qd9Ljr9aWlrXac2ulF3urUKqrMJVUu3amW8VSomIyKkK77zn7DzjqjZTypfte+B8KNWzgXdb91xls9nUwufnRs0Zhd2wc1W7qxxVRlVZYECgo2LL3+ZK7U3fS649l+CAYBpGNyzymKtzpUr6GRARHOEIu/T9JiLOCAqCm2+Gl1+Gt96CO+8s+rrI5fO5+oSTuSdL7Bk/lnlM5dU1kBVKBQVBixawYoVCKRERKa5wKOUsa6aUP7XvWZt7+Fv7HrgQSnl5npQ7OsR34M/df6qlyA/N3jabn7f+THBAMC/1e8nXy/GY3k16M2PLDBYkL+CBMx/w9XIcrNa9JrFNirUVuh1KnfIzoEN8B7Yf3876w+s5v/n5FVuwiFQ7P/7o/LGXX+76+V0OpXo36c0naz7h2b7PAmDDht2w8/KfL3N+M/0Qq2msUCoyEhqbf8hWKCUiIsW4Gkpl52Vz8ORBwD/b9yIK/X3OHwadQ9mh1Mmck46Ap0qEUhq+7Lc+XvMxAHd3u5vWdVr7eDWeY1V8Ldi1AMMwsDlb0lnJth/fDkCLWi2KPVY4lCpvzdl52Y7wqkfDHkUe6xDfgWl/T1OllIiU6Mori35ss8Gp+9xZP35K2pmvPC6377184cu8s/IdLv78YnLyc3hk7iN0fKsjv+/6vVr9tUScY/3FWKGUiIiUxQqlgoOdO35f+j4AwoLCqBtRF/D/9j1/rpRauX8ldsNOo5hG1I/2/91I1L7nvzYd2QRAvxb9fLwSz+reoDthQWEczjjsV7PMHDvvFRpybmlftz1BAUEczzrumMFXmjUH15Brz6VOeJ1i51IILCJlsdsLbrNnQ9eu8PPPkJJi3n7+GU4/HWa6OWLc5VCqY0JH/h7+N+c0Pocr2l7ByZyTXN3+albdvYqWtVu6twqpsqxKqYgIhVIiIlI6VyulCu+8Z/313x/a9/x60HloHFByKOUYct6gR7HH/JH1S/K249vIyvPxP6w4GIbB5iNmYNO2blsfr8azQgJDOLPRmYB/zZVyhFK1iodSoUGhJMUnAbDmQNnDzgu37p1aUdUxoSNghlLGqeUPIiKFPPAAjB9v7sAXE2PeBgyAceNgxAj3zunWVhmxYbE80ecJvrruK2bcNIPn+j5XJf7qJp6n9j0REXGGy6HUKfOkwL/a96raoPOl+6rOPCmAxMhEaofXxm7YHZU54nt70/dyMvckgbbAEtvJqrreTXoDsCB5gY9XUsCaKVVSpRQ4P1fK2nmvpJ8B7eq2I8AWwLHMY462aRGRkmzbBnFxxe+PjYWdO907p8uh1MytM/kj+Q/Hx28ufZOuk7py47c3cjzzuHurkCqrpFBq/37Iy/PdmkRExP+4Gko5dt77Z54UqH2vPI5QKjul2GNVacg5aAc+f2VVSbWo1YKQQBd2LagirLlSVaVSCqBrYlcAVh9cXeZ5yvoZEBYURstaZseLvt9EpCw9esDIkXCwUH598CCMGgU93XyJ4XIoNWrOKNKy0wD46+BfjJw9koGtB7IjZQcjZ490bxVSZRUOpRITzVkhdjvs2+fbdYmIiH/JzTXfutq+VziUUvte2WLDYoHilVKHTh5iZ8pObNjoVr+bD1bmHiuUWndonY9XIhZr1lJ1a92znNnoTAJtgSSnJrMrZZevl0NWXpZjvl5FKqVSs1IdFYeltfBqrpSIOOODD8wilCZNoFUr89akCezdC++/7945Xd59b8fxHY7e5W83fstlbS7jhQteYOX+lQz8fKB7q5Aqq3AoFRAADRuaZXu7d5tfnCIiIlCxmVIWte+VrbT2vWV7zbaddnXbOYKrqkC/JPsfq1KqXZ12Pl5J5YgKiaJbg24s3buUBckLaBrX1KfrsYKxyOBIx4YPp+pSrwtg7tKXmpVa4vf48n3LAWgW14z4yPgSz9MhvgPfb/pelVIiUqZWrWDtWpgzBzb9013fvj3061ewA5+rXK6UCgkMISPX3HJt7va59G/ZH4Da4bUdFVRScxTefQ80V0pERErm7kypxrFq33NWqaHUP7NkTt0G3t852vcUSvmN6l4pBYXmSu3y/Vypwq17pw4nt9QOr+2oKF17cG2JxzjTvqvvNxFxls0G/fubg81HjIALL3Q/kAI3KqXOaXIOI2eNpFfjXizdu5Qp104B4O+jfxf5a6bUDIUrpUChlIiIlMwKpYKDnTu+rJlSvqqUys8v+DwiIgrut9r3/LVSyvELaYOqMU/KYlVK7Ti+g4zcDCKCI8p5hlQ2RyhVp/qGUn2a9mHsorH8nuz7uVLWkPPyhsp3rdeV3Wm7WXNwDb2b9i72uGPIeRk/AwpXJhqGUWoIJiJy8iT89hskJxe8LrK4swOfy6HUhIET+PdP/+abjd8w8ZKJNIxpCMDPW37molYXub4CqdKsUMp6ca5QSkRESuJKpVRWXhaHMw4DRdv3fD1TqvB1q0qllGEYVW7IuSUhMoG6EXU5knGEjYc30q1B1ZmHVR1l5mY62smqc6VUr8a9ANh0ZBOHTh4iITLBZ2vZfnw7UPo8KUvXel2Z9ve0UudKOfMzoG2dtgTaAknJSmFf+j7H73giIoWtWgUDB5odUydPQu3acOSImQckJLgXSrncvtcktgnTb5zOmnvWMPT0oY77X7voNf538f9cX4FUaadWSllzpBRKiYhIYa6EUlaVVHhQOLXDazvu93WlVOHrWkEU+M+gcyuUysjNICff/AffkbKDo5lHCQkMoXNiZx+uzj1qKfIfW45twcAgLiyO+IiS5xJVB3Ui6tAxoSNAkR3H3WU37PyZ/CeZua7/4HK07zkRSkHJw873pu1lb/peAmwBnF7/9FLPERoUSqvarQB9v4lI6R58EC67DI4fN1+XLV4Mu3ZBt27w6qvundPlUAog357Ptxu+5bnfn+O5359j6sap5Nvz3VuBVGmlte8lJ/tmPSIi4p/cCaUaxzYu0kLi65lSVigVGmpu7mHxl0HnMaExjvdTs1KBggqJrvW6EhoU6pN1VYQjlNLwZZ+zhpy3rdO22rd2WXOlJi2fRJ49z+3zGIbBPdPv4ZwPz+HJX590+fmFZ0qVxQql1h1aR25+bpHHrNa9DvEdiAyJLPM8jhY+fb+JSClWr4aHHjJfBwUGmq99GjeGl1+Gxx9375wuh1Jbj22l/ZvtufX7W/lu43d8t/E7bp56Mx3e6sC2Y9vcW4VUWZopJSIiznAllLKGnJ86q7Jw+55heHJ1zilpyDn4T/teUEAQ0SHRQEELn7XzXmnbwPs7q2JFlRu+VxOGnFvu7X4v4UHhzNk+h4dnP+z2eV5f/DrvrnwXgBlbZ7j8fGumVHmVUs3imhETGkN2frbjv5PFlfZdVSaKSHmCgwv+MJeQUFCMEhvrfgbgcig14ucRtKzdkt0P7mbl3StZefdKkh9Ipnmt5oyY6UYDoVRppe2+d/iw71+ci4iI/8j954/3ToVSaf/svFdoyDkUDYN8UZVUWijlL4POofhcqaX7quY8KUvh4cviW1bY0a5OOx+vpPJ1SuzEJ1d9AsD4JeOZtHySy+eY/vd0Hpr9kONja0aVs1KzUjmedRwov1IqwBbgaM89tYVPoZSIeNJpp8Ey8+9dnHsuPPUUfP45PPAAdOzo3jldDqV+2/UbL/d7uciMhzoRdXjxghf5bedv7q1CqqxTK6Vq1y54sb5nj2/WJCIi/set9r0yQilf/OHD3yuloCCUSs1OJc+ex4p9K4AqHEr980vyzpSdnMg54ePV1GyO9r0aUCkFcG3StTx3/nMADJ8xnLnb5zr93LUH13LDtzdgYHDX6Xe5NaPKat2rG1GXqJCoco/vmtgVgDUH1jjusxt2lu9bDjgZSv0TAm84vAHDF+WoIuL3XngB6tc333/+eahVC+691yxKeecd987pcigVGhhKek56sftP5JwgJNCJV5pSrZy6+57NphY+EREpzqX2PatSKrZoKBUcbP5/Bnwz7Ly8Sil/CqVSslJYf2g9mXmZxITG0KZOG98uzE11IuqQGJkIwMbDG328mprLMIyC9r06NSOUAni89+Pc3Plm8o18rvv6OkcwV5aDJw5y2ReXcSLnBH2b9+XNgW/Sp0kfABbsWuD0tZ1t3bM4hp0fXO24b8vRLaRmpxIeFO4IeMvSpk4bggKCSMtOc/xxQETEYhhmy95ZZ5kfJyTAzJmQlgYrVkCXLu6d1+VQ6tI2l/Kvaf9iyZ4lGIaBYRgs3rOYe366h8vbXu7eKqTKOrVSChRKiYhIcVYoFRxc/rGlzZSy2Xy7A5/Vsl5apZS/te9ZbTs9GvQgwObW3jZ+QS18vnfw5EHSstMIsAU4dmirCWw2G+9e9i5nNz6blKwULv3iUo5mHC31+Ky8LK6cciXJqcm0rt2ar6/7muDAYHo3NQen/578u9PXtiqlWtRq4dTxhXfgs6qcrJ8Bp9c/neDA8n/4hgSG0Lp2a0DfbyJSnGFAq1ae/z3f5Vco/7v4f7Ss3ZKz3j+LsOfDCHs+jF4f9KJV7VaMv2i8Z1cnfk+hlIiIOMMT7Xvg2x34rCDMqg62+GP7XkpWimPXrao65NyiHfh8b9ORTYA5ULsq7uJYEWFBYUwdNJVmcc3Yemwr1359LTn5OcWOMwyDO364g8V7FlMrrBbTb5zuGHdi7ea3+sBq0rLTnLquq5VSHRI6EGgL5EjGEfal7wMoEkw7SzvwiUhpAgKgdWs4Wno27955XX1CXFgcPwz+gc3DN/PNdd/wzXXfsHn4ZqYOmkpsWKxnVyd+T6GUiIg4w9lQKiM3g6OZ5qudU9v3wLeVUlVt0LkrA479mYYv+55jnlQNat0rLCEygWk3TCM6JJr5O+fz75/+XWzm0nO/P8cX674gKCCIb67/pkjLbMOYhrSo1QK7YWfh7oVOXXN7ynag/CHnlrCgMNrHtwcKhp27s9GBvt9EpCwvvgijRsG6dZ47p9u13K3rtOaytpdxWdvLalQZrxSlUEpERJzhbChlVUlFBkcSG1r8j11WVZI/hVL+WCm1N30v6w6ZrxirfCil9j2fq4nzpE7VMaEjX177JQG2AN5f9T7jFo1zPPbV+q94av5TALw18C36Nu9b7Pl9mro2V8rVSimALonmQJfVB1aTk5/jCKcUSolIRX3yifk659ZbYelSc35UeLi50VnhmzuCnDlo5KyRTp9w3IBx5R8k1YJhFMzXUCglIiJlyc0135YXSlnzpBrHNsZmTTUvxB/a96pCKDVvxzzyjXwaRDegYUxD3y6qgqxfkpNTk0nPTic6NNrHK6p5rFCqXd12Pl6Jbw1sPZBx/cfxwKwHGDVnFG3qtKFeVD2GfD8EgAfPfJC7ut1V4nN7N+nNR6s/cmqulGEY7EzZCThfKQXmXKnP//qcNQfXsPbgWnLyc6gdXtvpuVRQfAe+kn4Oi0jNc/vtcNFF8Prrnj+3U6HUqgOrnDqZDf3QqkkK/5W68HwNhVIiInIqVyulSponBWrfK48VSu1K3QVU/SopgFrhtagfVZ/9J/az4fAGzmh0hq+XVOM42vfq1txKKcuIM0aw8chG3l7xNjd8ewNRIVFk5WVxSetLeOXCV0p9nlUptXTvUrLysggLCiv12IMnD5KZl4kNG01imzi9tsLDzgu377oSLLWu3ZrggGBO5JwgOTWZpnFNnX6uiFRfVsfykCGeP7dTodS8IfM8f2Wp8qzWPSg5lEpJgRMnICrKq8sSERE/5GwotTut5J33LGrfK5sVSlmq+pBzS4eEDuw/sZ/1h9c7HUptP76dfp/048ZON/Jc3+cqeYXVV3ZetmMnuJrcvmex2Wy8cfEbbD22lV92/MLJ3JN0TOjI5GsmExgQWOrzWtZqSb2oehw4cYCle5c6QqqSWK17jWIaERLoxO4Q/7Da96y1ges/A4IDg2lbty3rDq1j/eH1CqVExKGyCiednimVb89n7cG1ZOYWfxWYmZvJ2oNrsRt2jy5O/JsVSoWFQWCh/wfHxEDsP2NAVC0lIiJQEEoFl7MruaN9rwpWSuXnQ16ed9d0qlNDqepQKQXu7cA3duFYdqTsYNyicZzMOVn+E6RE245vw27YiQ6Jpl5UPV8vxy8EBwbz9XVf06NBD1rVbsW0G6YRExpT5nNsNptjF77y5kpZIaArrXsA8ZHxNIxuiIHBtM3TAPd+BmjHSxEpyQUXwOmnl31zh1OVUgCfrv2UCUsnsOTOJcUeCw4M5o4f7uCBMx/g5s43u7cSqXJKGnJuadwYUlPNUKp9e++uS0RE/I/T7Xvp/7TvlbDzHvh2ppQ1R7G0SikwW/iCnH515XmnhlLdG3T3zUI8zNXhy2nZaXyy9hMAMvMy+WnLT1zf4fpKW191tunIJsBs3dN8oQK1wmux5M4l2A17mRVShfVp2oevN3zN78m/8wRPlHqcVSnlyiwoS9d6XdmbvpdcuznIz51qSQ07F5GSDBhQOV1QTr9sen/V+zx89sMl/tANCgjikV6PMGHpBIVSNUh5odS6daqUEhERk9Pte6n+375XuGUdCiqlwAzLSvr/orcU3rGwbZ22xUKqqsrVHfg+WfMJJ3JOOD7+av1XCqXc5Jgnpda9Ymw2G4E25wIpwFEptXD3QvLseQQFlPyrmKNSyoWd9yxdErvw05afAGga25TEqESXz6EdL0WkJKNGQUKC58/rdPve5iObObPRmaU+3qNBDzYe2eiRRUnVUNLOexYNOxcRkcJcnSlVldr3goIK2th9Pey8cAhVXVr3AJLikwBzEH5qVmqZxxqGwYSlEwAYetpQAH7a8lORkEqcZ+28p1Cq4jomdCQuLI4TOSdYfWB1qcdVJJSyhp2D+z8DrEqpDYc3aDyLiACVN08KXAilTuaeJC07rdTH03PSycjN8MiipGoor1IKFEqJiIjJmVDqRM4JUrJSAP9s3ystlAL/GXYeG1ZQKVWdQqm4sDgaRjcEzF+Uy/LLjl/YfHQz0SHRjBswjla1W5GVl8X0v6d7Y6nVjhVKtavbzscrqfoCAwLp1bgXUPZcqe3HtwOuz5QCz4RSLWu3JCQwhIzcDHal7HLrHCJSvVi771UGp0Op1rVbs3D3wlIf/yP5D1rXbu2RRUnVYIVSp7YxQEEolZzsvfWIiIj/yjXHm5QZSu1JM+dJRYdElzo02B/a90oKpawWPl+HUkEBQY4Wvuqy857F2ZaiN5e9CcCtXW4lJjSG65PMtr0p66dU7gKrIcMwCtr36qpSyhOsXfd+T/69xMfz7HmONmZ3KqVa1m5JVIg59MXdUCooIMgRQqqFT0QAduyA+PjKObfTodSNnW7kyV+fZO3BtcUeW3NgDU/Ne4obO93o0cWJf1OllIiIOMuZSinHznulVEmBf7bvQUFY5uv2PYAX+73Ig2c+WK0qpcC5HcF2pezix80/AjCsxzAAxyypn7f8XGbVvxR3JOMIx7OOY8OmPz57SOEd+IwSSg92p+4m38gnNDCU+tH1XT5/gC2ASZdM4rFej3FOk3PcXqf1/bbu0Dq3zyEi1UfTppXXwuf0oPMHz3yQn7f+TLd3utGvRT/a1THT801HNzF3+1x6Ne7Fg2c+6NLFf9/1O68sfIUV+1aw/8R+pg6aypXtrizzOW8ufZMJyyawM2UnTWKb8ETvJ7i1y60uXVc8w9lQyjAqtwdVRET8n1OhVDnzpEDte864p/s9vl5CpXD8kny49F+SJy2fhN2wc0HzC2gfb27/2zmxM23rtGXz0c1M2zyNmzrf5JX1VgdW616T2CaEB5fwhS8u69agG+FB4RzNPMrGIxsd89Is1jyppnFNCbA5XT9QhCe+xrUDn4h4i9M/6YIDg5l982ye7/s8+9P3887Kd3h7xdvsT9/P832fZ/YtswkODHbp4idzTtIlsQtvDnzTqeMnLpvI6F9G859z/8P6f6/nmfOeYdiMYUzbPM2l64pnlBVKNfpn06SMDDh+3HtrEhER/2SFUsFlvFSw2vecCaX8rVLKat/zh0qp6srRvldKpVRWXhbvrXoPKKiSAnOHNKta6qsNX1XyKqsXte55XkhgiGPzqJLmSu047v6Qc08q7/tNRMRTnK6UAjOYeqTXIzzS6xGPXPzi1hdzceuLnT7+07Wfcne3uxnUcRAALWq1YNneZbz050tc1vYyj6xJnFfW7nvh4VC3Lhw5YlZL1a7t3bWJiIh/caV9r1FMo1KP8eVMKev/e/5eKVVdWRUl+0/s53jmcWqF1yry+Ffrv+JIxhEaxzQu9rrw+g7X8+zvzzJz60xSs1KLDISX0m06sgnQznue1qdpH+btnMfvyb9zd/e7izxmVUq1qNXCF0tzsCqlNh7ZSL49n8CAQJ+uR0SqL5dCKV/Lzs8mLCisyH3hweEs3buU3PzcEiu1srOzyS70Z8v09PRKX2dNUValFJgtfFYo1aWL99YlIiL+x6X2PT+fKVXSBh8KpSpfTGgMjWMaszttN+sPry82L8cacH5P93sICij6ErdDfAfa123PxiMb+XHzj9zS5RaXrp2Zm8moOaPYm7633GPrhtfl1f6vVovgy2rfUyjlWdZcqd93/Y5hGNgKzbmwQilfV0q1qNWCsKAwsvKy2JGyg1a1W/l0PSLiPxYsgLffhm3b4JtvoGFD+PRTaN4cznFjlF2VCqUGtBzAe6ve48p2V3J6/dNZsX8F7618j1x7LkcyjpQ4DHDMmDE888wzPlht9VfW7ntghlKrVmnYuYiIOBdKudK+528zpdS+5x0dEjqYodShoqHU0r1LWbp3KSGBIdx5+p3Fnme18D3z2zN8teErl0OpVxa+4gi9nNEophFPn/e0S9fwR1YoZe3EJp5xZqMzCQoIYk/aHnal7qJZXDPHY472vVq+DaUCAwJpV7cdqw+sZv2h9QqlRASAb7+FW26Bm24yf9e3XvekpsILL8CMGa6f073peT7yf33+j4tbXcyZ759J8LPBXPHlFQzpMgSg1EGAo0ePJjU11XHbsGGDN5dcrTlTKQUKpURExLVKKX9t36sqg86rs9KGL1uB0aAOg0iITCjxudclXQfArK2zSMlKcfqa+9L38dKfLwHwyNmP8Palb5d6e+CMBwB4e8Xb5ObnuvKp+Z3c/Fy2H98OaKaUp0WGRNKtfjeg+Fwpf6mUAg07F5HinnsOJk2Cd98tOie0Vy9YudK9c1apSqnw4HA+uOID3r70bQ6ePEj9qPq8s+IdokOiiY+ML/E5oaGhhFp/vgTS0rQVsKcolBIREWfl/vP7eWmhVFp2GmnZ5v+j/bF9Lz+/IFhTpZTvdEzoCBT9JfnwycNMWTcFKDrg/FQdEjrQIb4D6w+v54dNPzCk6xCnrvl/v/4fGbkZnNXoLF7s92KRVqtT5eTn8MW6L8xdpTdNdQxYr4q2H99Onj2PyOBIGkY39PVyqp0+TfuwZO8Sft/1u6NyLyM3gwMnDgC+r5QChVIiUtzmzdCnT/H7Y2MhJcW9c7pdKZWTn8PmI5vJs+e5ewq3BQcG0yimEYEBgXy5/ksubXOp21umivvKC6WaNDHfKpQSEanZ7HbI++flQmmhlNW6FxcWR1RIVKnn8lX7XuHrqVLKdxy/JBfaEez9Ve+TnZ9N9wbd6dmwZ5nPd3UXvtUHVvPh6g8BGDdgXJmBFJg7q/2r278AXGr380dW616bOm3K/bzFddZcqQXJBZVSO1N2Aub8tFphtUp6mldpBz4ROVW9erB1a/H7//gDWri5P4PLSU5GbgZDfxhKxPMRdHirA8mpyQDcN+M+XvzjRZfOdSLnBKsPrGb1gdWA2UO9+sBqxzlHzx3NrVNvdRz/99G/+WztZ2w5uoWle5cy+JvBrDu0jhcueMHVT0M8QJVSIiLijNxCXUzBxfckAZzbeQ98175X+HoKpXynfXx7AA6ePMjRjKPk2/OZuHwiYFZJlReeWC18s7fN5njm8TKPNQyDh2Y/hIHBoA6DOLPRmU6t8e5udxNoC+T3Xb+z9uBap57jjzYf+WfIuVr3KsU5Tc7Bho3NRzdz8MRBoNA8qbjmfhEEWiHwpiObyLfn+3g1IuIP7roL7r8fliwBmw327YPPP4eHH4Z773XvnC6HUqPnjmbNwTXMv21+kZ3w+rXox5T1U1w61/J9yznt7dM47e3TABg5eySnvX0aT817CjC3/LUCKoB8ez5jF42ly6QuXPjphWTlZbHwjoVFhgOK91hbY5cXSu3ZY/6VXEREaiar7Q1Kr5Ry7LxXxpBz8F37nnW9kBAIKOHVk9r3vCMqJMrxum/94fVM/3s6yanJ1Amvw6AOg8p9fvv49nRK6ESePY/vN31f5rE/bfmJX3f8SmhgKC/2c/4Prw1jGnJ1+6sBeHNp1a2W2nRkE6Cd9ypLrfBajnbUP5L/AArNk/KD1j0w1xEeFE52fjbbjm/z9XJExA889hjceCNccAGcOGG28t15J9x9N9x3n3vndHmm1Pebv2fKtVM4s9GZRRL8Dgkd2HbMtR9W5zU7D+Npo9THP7ryoyIft49vz6q7V7l0Dak85e2+16CBmZ7m5MDhw5CY6L21iYiI/3AqlEp1LZTydkVSWUPOQZVS3tQhvgM7U3ay/tB6vtv0HQB3nn4n4cGl/Mc5xaAOg/jr0F98teErbj/t9hKPyc3PZdScUQDcf8b9Lv8BdFiPYXy94Ws+++szXrrwJeLC4lx6vj+w2vcUSlWePk378Nehv/h91+9ck3RNkUopfxBgC6BTYieW7l3Kyv0raVOnja+XJCI+ZrPBE0/AqFFmG9+JE5CUBFGlT14ol8uVUodPHi5xV5OTOSf9osxUvKe89r3gYKhf33xfLXwiIjWXFUoFBEBgYMnHWDOlymvf81WllFUdXNofYqxKKYVSlc9qKfp247fM3T4XGzbu6X6P08+/roPZwjd3+1yOZhwt8Zh3VrzDpiObqBtRl8d7P+7yGvs07UPHhI5k5Gbw0eqPXH6+P7BCqXZ12/l4JdXXqXOlrEqpFrXcHMxSCXo2MOe0Ld271McrERF/EhJihlE9e1YskAI3QqnuDbrz098/OT62YQZR7618j7ManVWx1UiVUl4oBZorJSIiBaFUaVVSUKh9r4yd96CgIikvr2B4ujc4Wyml9r3KZw1f/mXHLwBc1vYylyqZ2tRpQ9d6Xcmz5zF109Rij6dkpfCf3/4DwDPnPUNsWKzLa7TZbI6dAN9a9hZ2o2rNMTiWeYwjGUcAVB1TiXo3NUOp1QdWk5qVWtC+5yeVUoBj8wCFUiIC5h/fXnkFBg6E7t3h9NOL3tzhcvveCxe8wMWfX8yGwxvIs+cxfsl4NhzewMLdC/nttt/cW4VUSc6GUkuWKJQSEanJrEHnToVSTrbvgfnCqKJ/nXOW2vf8h1UpZbHCH1dcn3Q9qw+s5qv1X3Hn6XcWeeyFBS9wJOMI7eq2c+yk546bO9/Mo3MfZcuxLczZNocBrQa4fS5vs4acN4ppRGRIGS/0pEIaRDegZa2WbDu+jYW7Fxa07/nJTCmAHg17ALBy/0ry7HkEBbj866OIVCNDh8Ls2XDttWaVlCea5VyulDqnyTmsvns1efY8OiV0Yva22SREJrBo6CK6NehW8RVJlZCXV/CXb1VKiYhIWcqrlDIMo2CmlJOVUuDdFr7yQikNOvee9vHtHZX6beq0oV+Lfi6fw2rh+3XHrxw+edhx/47jOxi/ZDwAr174aoV+AY8KieL2rubMqgnLJrh9Hl/QPCnv6dO0DwA/bP6B1OxUAL/axKlNnTbEhMaQmZfJ+kPrfb0cEb80Zgz06AHR0ZCQAFdeCZs3Fz0mKwuGDYM6dcw/qF1zDRw8WPSY5GS45BJzVEBCgjm36dSq8PnzzYqk0FBo1Qo++qj4et58E5o1M18znXEGLD2l0NGZtZRm+nT4/nuYOBH+8x94+umiN3e4HEoBtKzdkncvf5eldy1lw7ANfHb1Z3RK7OTeCqRKsmZrgHOhVHJy6ceIiEj1Vl4olZqdyslcs/y2vJlSAQEF5/GnUEqVUt4TERxBy9otAbNKKsDm+svZVrVbcXr908k38ou08I3+ZTQ5+Tlc0PwCBrYeWOG1/rvHvwH46e+fHFUwVYFVKaVQqvJZc6W+XPclAImRiUQElzK8zgcCbAH0aGBWS6mFT6Rkv/1mhjyLF8OcOWaFeP/+BZ1FAA8+CNOmwddfm8fv2wdXX13weH6+GUjl5MDChfDxx2bg9NRTBcfs2GEec/75sHo1PPCAufPdrFkFx0yZAiNHmgHRypXQpQsMGACHDjm/lrI0bGiGb57k8v/FZ2yZwayts4rdP2vrLH7e8rNHFiX+z/oGs9kK/jpcElVKiYiIFUoFB5f8uFUlVTu8tlO/jPli2LmzlVIKpbzjtQGvcf8Z93PX6Xe5fY7rk64H4Kv1XwGwaPcipqyfgg0bY/uP9cgGPm3qtKF/y/4YGExcPrHC5/OWTUc3AdC2rkKpymZVSllVUv7UumfRXCmRss2cCbfdBh06mCHQRx+ZRRkrVpiPp6bC++/DuHHQty906wYffmiGT4sXm8fMng0bNsBnn0HXrnDxxfDss2bVk/U6atIkaN4cxo6F9u1h+HCzje611wrWMm4c3HUX3H67OYh80iSz8uqDD5xfS1nGjoVHH4Vduzz0j4cbodRjcx8j38gvdr+BwWO/POaRRYn/KzxPqqzXbAqlRESkvEopa+e98uZJWaxgyJsBkAad+5dL21zK6xe9TnhwKf9BnGC18M3bOY+DJw4ycvZIAG7vejtd6nXxyDqhYObV+6veJzPXy9tGukmVUt7TolYL6kfVd3zsT0POLY5Qap9CKRFnpJoZM7Vrm29XrDCrp/oV6jZv1w6aNIFFi8yPFy2CTp0gMbHgmAEDIC0N1q8vOKbfKR3rAwYUnCMnx7xW4WMCAsyPrWOcWUtZunc3X3+1aGFWTNWuXfTmDpcb5bcc20JSfFKx+9vVbcfWY1vdW4VUOc4MOYeCUGrfPrMfNkizEUVEapzyQilryHl5rXsWKwDyp0opte9VPS1qtaB7g+4s37ecW6bewuI9i4kIjuDZvs969DqXtL6EprFN2ZW6iy/Xfcntp93u0fN7Wp49z/Gavl3ddj5eTfVns9no07QPU9ZPAfwzlLLa99YfWs/JnJMafi81Rnp6OmlpaY6PQ0NDCS2rTQiw2822ul69oGNH874DB8zXQHFxRY9NTDQfs44pHEhZj1uPlXVMWpr5OuX4cbMNsKRjNm1yfi1lueEG2LsXXnjBfI5PBp3Hhsay/fj2YvdvPbaVyGD9gKopnA2lEhPNIMpuh/37K39dIiLif8oNpVKd23nP4ov2PWuWogadVy9WC9+c7XMAeLTXozSIbuDRawQGBDpmS01YNgHDMDx6fk/bmbKTXHsu4UHh5W48IJ5hzZUCMyz1Nw1jGtIgugH5Rj6rDqzy9XJEvCYpKYnY2FjHbcyYMeU+Z9gwWLcOvvzSCwv0gYULzVlUjz5qtiwOGVL05g6XQ6kr2l7BAzMfYNuxbY77th7bykOzH+Lytpe7twqpcpwNpQIDodE/f/hWC5+ISM1Ubvte+j/te07+AuzL9r2IUkZeqVKqarq+w/WO9xtEN+Chsx6qlOvccdodhAaGsnL/SpbsXVIp1/AUq3WvdZ3Wbg2RF9f1bloQSvnjTCnQXCmpmTZs2EBqaqrjNnr06DKPHz7c3J1u3ryC34EB6tUzXwulpBQ9/uBB8zHrmFN3wLM+Lu+YmBjztVHduubv3yUdU/gc5a2lLO3aef6Pgi7/n+blC18mMiSSdm+2o/n45jQf35z2b7anTngdXu3/qmdXJ37L+otxeaEUaK6UiEhNl5trvi2vUkrte+JtTeOaOgZNj7lgTKW1JdWNqMsNnW4AYMLSCZVyDU/ZfFTzpLytY0JHmsQ2ISworMQxKf6gZwOFUlLzREdHExMT47iV1rpnGGYgNXUq/PqrOYy8sG7dzM1efvml4L7Nm81h6GedZX581lnw119Fd8mbM8cMnJKSCo4pfA7rGOscISHmtQofY7ebH1vHOLOWsrz4Ijz0EMyfD0ePmq2DhW/ucHnCT2xYLAvvWMic7XNYc2AN4cHhdE7s7PgfutQMzlZKgUIpEZGaztmZUv7cvufs7ntq36t6vrr2KzYe2ch5zc6r1OsM6zGMj1Z/xFfrv2Js/7EkRiWW/yQf0JBz7wuwBTB/yHzSstOoF+VEqYIPqFJKpHTDhsHkyfDDD+bwb2s2U2ys+bohNhaGDoWRI81h4DExcN99Zgh05pnmsf37m+HTLbfAyy+b53jySfPc1muMe+6BCRPgkUfgjjvMAOyrr+CnnwrWMnKk2UbXvTv07Amvv27+7n777QVrKm8tZbnoIvPtBRcUvd8wzPlS+cX3xCuXW2OnbTYb/Vv2p3/L/u48XaoBK5QqrY2hMIVSIiI1W1mhlGEYBTOlqkD7niqlqp/EqESvBETdG3TnjIZnsGTvEt5b+R5P9Hmi0q/pDkelVF2FUt7kr217lm4NugGwI2UHRzKOUDeiro9XJOI/Jk403553XtH7P/zQnLsE8Npr5k5411xj/gFrwAB4662CYwMDzda/e+81A6LISDNc+u9/C45p3twMoB58EMaPN1sE33vPPJdl0CA4fBieesoMtrp2hZkziw4/L28tZZk3z8l/FBc4FUr9b8n/+Fe3fxEWFMb/lvyvzGNHnDHCIwsT/6ZKKRERcZYVSgUHF3/seNZxMvPMxKdhdEOnzqdKKamqhvcczpKpS5i0YhKPnvMoQQH+ty3xpiPmFk2qlJLC4sLiaFunLZuPbmbZ3mVc3PpiXy9JxG84s39FWBi8+aZ5K03TpjBjRtnnOe88WFXOfgPDh5u3iqylNOee6/pzyuPU/wlfW/waN3W6ibCgMF5b/Fqpx9mwKZSqIRRKiYiIs8qqlLKqpOpG1CU8uJTE5xSaKSVV1XVJ1zFy1kj2pO3hx80/cnX7q329pCJSs1I5eNKckKtKKTlVz4Y92Xx0M0v3LlUoJVJD/f572Y/3cWOqk1Oh1I77d5T4vtRcCqVERMRZZYZSLs6TAv9v37PmKoicKjQolLtOv4sX/niB1xe/zlXtrsLmR18sVute/aj6xITG+Hg14m96NuzJp2s/Zek+zZUSqalObVGEoq953Jkp5dLue7n5ubT8X0s2Ht7o+pWkWnEnlDp4UG0NIiI1UVmh1J60PYDzO++Bb9r3rF1ny2vfg4LdBkVKcm+PewkNDGVB8gJ+2vJT+U/wIseQc1VJSQkKDzs3nOlXEpFq5/jxordDh8yZVT16wOzZ7p3TpVAqODCYrDzVpUvBi3NnQqk6dQr+grx3b+WtSURE/JMV0pTVvudKpZQv2/dK2+DDWhOohU/K1iimEQ+c+QAAD89+mNx8/0kxHUPONU9KStAlsQvBAcEcyTjCrtRdvl6OiPhAbGzRW926cOGF8NJL5q6A7nAplAJzO9uX/nyJPHuee1eUasGV3fdsNrXwiYjUZE617zm58x7456Dzwp+bQikpz+hzRhMfEc/mo5t5Z8U7vl6Og0IpKUtoUChd6nUBzGopERFLYiJs3uzec13e8mPZvmX8sv0XZm+bTafETkQGFy2V+W7Qd+6tRKoUV9r3wAyltmxRKCUiUhM5075X1WdKBQSYn19OjlrVpXyxYbE8c94z/HvGv3l6/tPc1Pkm4sLifL0ste9JuXo26MnyfctZuncp13e43tfLEREvW7u26MeGAfv3w4svQteu7p3T5VAqLiyOa5Kuce9qUm24E0qBQikRkZrICqWCg4s/ZlVKuTJTyh933wNzXTk5qpQS59zV7S7eWPoGG49s5IUFL/DyhS/7dD359nz+Pvo3oEopKV3Phj15a/lbqpQSqaG6djU7oU4dK3fmmfDBB+6d0+VQ6sMrPnTvSlKtuBtKJSdXznpERMR/lVYpZRhGQaVUFW/fg4Jh56qUEmcEBQTxav9XuWTyJYxfMp57u99L81rNfbae9YfXk52fTWhgKM3imvlsHeLfrGHnK/avIM+eR1CAy79OikgVtmNH0Y8DAiA+vuhsTVc5PVPKbth56Y+X6PVBL3q824PH5j5GZq4XXw2KX1GllIiIOKu0UOpIxhHHBioNoxs6fT5/bN+DghdkqpQSZ13c6mIubHEhOfk5PPbLYz5dizXbamDrgQQGBPp0LeK/2tZtS3RINBm5GdqRXaQGatq06K1x44oFUuBCpdTzvz/Pf377D/1a9CM8KJzxS8Zz6OQhPrjCzRotqdJc2X0PoEkT861CKRGRmqe0UMqqkkqITCA0KNTp83m7UspuL6h+UiglnmSz2Xi1/6t0ndSVr9Z/xf1n3M/Zjc/2+jrSstP4eM3HAAzvOdzr15eqI8AWQPcG3Zm3cx5L9y6lU2InXy9JRCrZ//7n/LEjRrh+fqdDqU/WfsJbA9/i7u53AzB3+1wumXwJ713+HgE2lzfxkyrOld33QJVSIiI1WWmhlGPnPReGnIP3Z0oVvo7a98TTOid2ZuhpQ3lv1XuMnDWSRUMXYbPZvLqGT9Z8womcE7Sv257zm53v1WtL1dOzYU9HKDX09KG+Xo6IVLLXXnPuOJutkkOp5NRkBrYe6Pi4X4t+2LCxL32fS8NJpXpwt33v+HHzuc4+T0REqr7cXPNtsVAq9Z9QyoV5UuD99j1nQylVSom7nu37LF+s+4Ile5cwZf0UBncc7LVrG4bBm8veBGBYj2FeD8Sk6rHmSi3dp2HnIjXBqXOkPM3pEqc8ex5hQUWbBYMDg8nNz/X4osS/GYbroVRMjHkD2LOnctYlIiL+qbz2vUbRrv1xy9vte9Z1QkIgsIxRO6qUEnfVi6rHY+eYM6Uem/uYY9aaN/y641c2HdlEVEgUt3S5xWvXlarLCqX+OvgXGbkZXr9+bn4ubyx5gyHfD2H9ofVev76ImAyj+C587nC6UsowDG774TZCAwtmPmTlZXHPT/cQGVyQTHw36LuKr0r8Wna2OV8DXKt4ql0b0tIgJaVSliUiIn6q1FAq/Z9QysWKa1+175VVJQWqlJKKGXnWSCYtn8Su1F2MXzyeR8951CvXnbBsAgBDugwhJjTGK9eUqq1hdEPqR9Vn/4n9rD6w2qtz0ObtmMd9P9/H+sNmGPX52s8ZccYInj73aWLDYr22DpGa7JNP4JVXYMsW8+M2bWDUKLjFzb9rOF0pNaTrEBIiE4gNi3Xcbu58Mw2iGxS5T6o/q0oKXAulYv/58khN9ex6RETEv1mhVHBw0fv3p+8HoEF0A5fO56tKKYVSUpkigiMYc8EYAF744wUOnTxU6ddMTk3mx80/AmbrnogzbDYbPRr2AGDpXu+08O1J28PgbwbT95O+rD+8nroRdRnQcgD5Rj6vLX6NthPa8smaT7Abdq+sR6SmGjcO7r0XBg6Er74ybxddBPfc4/zsqVM5XSn14RUfuncFqXasnfdCQiDI6a8ghVIiIjVVaZVS+9L3AVA/ur5L5/PVTKnyQim170lF3dT5JsYvGc+K/Sv4z/z/8NYlb1Xq9SYtn4TdsNO3eV/ax7ev1GtJ9dKzQU9+3PxjpYdS2XnZvL74dZ79/VlO5p4kwBbAvd3v5b/n/5fa4bWZtXUWI2aO4O+jfzPk+yG8veJt3hz4Jl3rda3UdYnUVG+8ARMnwq23Ftx3+eXQoQP85z/w4IOun1Pb5onLXJ0nZVEoJSJSM5UWSu0/4V6lVOGKJE/MMiiPKqXEWwJsAYztPxaAd1a8w4bDGyrtWll5Wby78l0AhvcYXmnXkerJMey8EkOpWVtn0XlSZx775TFO5p6kV+NerPjXCiYMnEDt8NoADGg1gL/u/YuX+r1EZHAkC3cvpNs73Rj20zCOZR6rtLWJ1FT798PZJXTsnn22+Zg7FEqJy6xQKiLCtecplBIRqZlKCqUyczNJyUoBoH6Ue5VS4J0ASJVS4k3nNjuXK9tdSb6Rz6g5oyrtOl+v/5ojGUdoHNOYy9peVmnXkeqpe4PuAGw7vo2jGUc9eu6dKTu5aspVXPT5Rfx99G8SIxP55MpPWHD7ghIroEICQ3ik1yNsGr6JwR0HYzfsvLX8Ldq80YZ3V7xLSlYKqVmpZd5y8nM8+jlUd4Y3/iIkfqlVK7Nl71RTpkDr1u6d04XmKxGTKqVERMQVJYVSVpVUaGAocWFxLp3v1FCqvLCooqy2dVVKibe83O9lpv89nRlbZvDdxu+4uv3VHr+GNeD8nu73EBSgXwnENbXCa9GmThv+Pvo3y/ctZ0CrAR457++7fufizy8mIzeDQFsg959xP0+f97RTQ/gbxTTii2u+4O5udzN8xnDWH17Pv6b/i39N/1e5z40IjuDPO/6stm1/ry16jcd+eYzvrv+OS9pcUqFzvbPiHUb8PILvBn3HwNYDPbRCqSqeeQYGDYLff4devcz7/vwTfvml5LDKGaqUEpe5G0rF/PP/EoVSIiI1S26u+bZIKFVoyLnNZnPpfEFBEPDPKxhvDDu3rlFehbBCKfGU1nVaM/LMkQDcMvUWVh9Y7dHzL9u7jKV7lxISGMKdp9/p0XNLzdGjgWeHnW89tpWrplxFRm4G5zQ5hzX3rGHsgLEu7wp5XrPzWHX3Kl4f8Dq1wmo59ZyM3AxeXfiqO8v2e3vS9vDEr0+Qk5/D8J+Hk5Xn/v+kDp08xMOzHyY735z1JTXHunXm22uugSVLoG5d+P5781a3LixdCldd5d659WcRcZkqpURExBVlVUq5OuQcwGYzq5ZOnvRuKKX2PfGm5/o+x8oDK5m7fS6XfXEZS+9c6tb3S0neXPYmANd3uJ6EyASPnFNqnp4Ne/L5X5+zdF/FQ6mUrBQu++IyjmUeo0eDHsy+eTbhwe6XwQYHBnP/mfdz3xn3kZufW+axqw+s5sz3z+Sr9V8xtv9YEqMS3b6uP3ri1yfIzDP/R7YzZSdvLHmDUb3caw1+et7TpOekA/Drjl85fPIw8ZHxHlur+K/OnaFHD7jzThg8GD77zHPnVqWUuEyhVM02YwYMGAC7d/t6JSJSVVihVHBwwX2OnfdcnCdl8eYOfBp0Lr4QHBjM19d9Tds6bdmTtocrp1xJZm7FU9gjGUf4ct2XgAacS8UUHnZekRlDefY8rv/6ejYd2USjmEb8MPiHCgVShQXYAggNCi3zdkajMziz0Znk2nMdw/+rixX7VvDJmk8AGNFzBADPLXiOwycPu3yu9YfW887KdwBIiEwg38hn6qapnlus+LXffjN32HvoIahfH267DRYs8My5FUqJy6zZGgqlaqY33oDZs+Gjj3y9EhGpKkqslEp3b+c9ixUQ+VOllEIp8bS4sDim3zid2uG1Wbp3Kbf/cHuFBwy/v/J9svOz6d6guyNUEHFH13pdCQoI4tDJQySnJrt9nvt/vp852+cQGRzJtBumeawi0BXDegwDYNLySeTZ87x+/cpgGAYPzX4IgJs738xrF73GafVOIy07jWd+e8bl842aMwq7Yefq9lc72ou/Wu/mECGpcnr3hg8+MHfYe+MN2LEDzj0X2rSBl16CAwfcP7dCKXGZdt/zL4t2L2JP2h6vXW/PP5datMhrlxSRKq7M9j03K6WsAMifQim170llaFW7Fd9d/x3BAcFMWT/FrV8mLfn2fN5a/hZg/hLu6jw3kcLCgsLoktgFgGX7lrl1jglLJ/DW8rewYePzqz/32aDx65KuIz4inr3pe/lh0w8+WYOn/bD5B37b9RthQWG80PcFAmwBjO0/FjDDt42HNzp9rtnbZvPz1p8JDgjmpX4vcV2H6wCYt3Meh04eqpT1i3+KjITbbzcrp/7+G667Dt58E5o0gcsvd++cCqXEZWrf8x+rD6ym1we9GPTNIK9dc5/ZccPixWC3e+2yIlKFlRRKOdr33PyLuNr3pCY5t9m5TLp0EgDP/PYMX/z1hVvnmf73dJJTk6kTXodBHbz32kGqr8ItfK6auXUm98+8H4AX+73IFe2u8OjaXBEaFMq/upm79Fk7U1ZlOfk5PDLnEQAeOushGsc2BuD/2bvv+KbK7w/gn6SbbkYnlFVWmWXKBkVABGUJCC5AkaX+ZKj4Rdyi4gAVQRTFgQoioOylbJBZVpml0JYuKN27yf39cXh6051xs9rzfr36SmiTm6dtWno/Oec8/Rv3xyMtHoFG0uCV3a/odSyNVlNccTWz60yE1g5FE98m6BzUGVpJi/UX15vnk2A2LzQUeP11YP58wNMT2LLFuONwKMUMxqGU7TgYcxASJBy7dQwFmgKzP15uLnD3Ll1PTaV0nDHGqlJZpVR1at/jSilmTpPCJ2FO9zkAgIl/TcTRuKMGH0MMOH+247OKzexhNZuxO/BF3o7E2HVjoZW0mNhhIub2MG7wtpKe7/Q81Co19t7YiwvJF6y9HJMsO74MV+9ehb+7P17t+WqJj3084GM4qh2x+cpm7Lm+p8pjfX/6e5xPPg9fV1/M7zO/+P1jwsYA4Ba+mmr/fporFRAAzJ0LjBwJHDpk3LE4lGIGUyKUMnEcArvnTOIZADQg8mrKVbM/XkJCyX8fNfzvYcZYDaPRyL/zy5spVZ3a97hSipnbhwM+xCMtHkG+Jh/Dfx9u0ByfS3cuYdf1XVBBhamdp5pxlawmEZVSJ+JPQKPV6HWf29m3MfTXocjIz0Cfhn2wfOhym2glbeDdAMNbDgcgB7j26G7u3eI233f7vwtPF88SH29RtwWmdZ4GAJi9c3al37fM/EzM/5eCqDf7vonabrWLPyZa+Pbd3IfELBMGCjG7ER8PfPABzZHq1w+4dg344gt6/7ffAvfdZ9xxOZRiBjM1lCos5D/YlRKRFFF8/cJt87+ic+tWyX/zXCnGWFUKdIo4RSiVX5SPlNwUAKa371kilBIbfFQ1S5FDKWZuDmoHrB65Gu392yMpOwnDfhuGzPxMve779XGaJTWsxTA08mlkxlWymqRl3ZbwcPZAdmE2Lt6pekZRflE+Rq4diei0aDTxbYI/x/wJZwfnKu9nKWLg+U9nfkJ6nn22d7y3/z2k5qWijV8bTAqfVO5t3uz7JnxcfXAm6Qx+PPNjhcf66NBHSM5ORmjtUEzrMq3Exxr5NELX4K7cwldDPPQQ0LAhDTkfMQK4eBE4eJDmSxmaC5TGoRQzmLG773l4AOJFEG7hM12Rtgjnks4V/9sSZcYilFLf+83BoRRjrCrlhVLiFVUntRPquNUx6ri2OFOK2/eYJXg4e+Dvx/+Gv7s/ziadxYT1E6qsUMnMzyw+8RQn3YwpwUHtgM5BnQEAx29VPuxckiQ8v/l5HIw5CG8Xb2x+fDPq1qpriWXqrX+j/mhVtxWyC7Px05mfrL0cg127ew1fHaOZWJ8O/BQOaodyb1enVh3M700VUPP/mY+sgqwyt4lNj8WnR2gw+qIHF5UbHorZdNzCV/05OQHr1tGmVx99BLRoodyxHZU7FKspjN19T60GvLwokEpPp/5TZrzLdy4jXyOf+ViiUkoMOe/TB9i7Fzh/HsjIoO8rY4yVRzeUcnKiy+Kd9zwDjW7Z4PY9VpOFeIfgr3F/oe+qvth0ZRO6fdcNPq4+Fd7+bu5dZORnoHmd5hjQZIDlFspqhK5BXbH3xl68s/8drD63usLbZRdm42jcUTioHLD2sbVoVa+VBVepH5VKhZldZ2LG1hlYenwpZnadaROthfp6dferKNQWYnDoYAxsOrDS287sOhNfn/ga11OvY9GhRXi7f8mdPV//53XkFeWhT8M+eLRF+UPoR4eNxuyds7H/5n4kZCYYXf3MbN/ff5vv2BxKMYMZ274HUAufCKWYaSISIwBQpUGhttCi7XudOwPXrwMxMcDx48ADD5j9oRljdkqEUo6OcrVs8c57Rs6TAnjQOWPd6nfDj8N/xLg/x+Fkwkm97vNi1xehVnGjBFNW/8b98fHhj3Ej7QZupN2o8vZLBi+pMjCxpifbPYnXdr+GyymXsSd6j90Euftv7sf6i+uhVqnxyYOfVHl7F0cXfDTgIzz2x2NYdHgRpnSagmCvYABU9fbL2V8AAJ8N/KzCYC7EOwTd63fHkbgjWBe5Di90e0G5T4jVGFYNpfbf3I9FhxfhZPxJJGQlYMPYDcXD5Sqy+uxqfHz4Y1xNuQpvV288FPoQFj24CHVqGVf+zwxnaigFcCilBBFKPdz8YWy8tBFXU64ivygfLo4uZntMEUoFBQHdu1ModeQIh1KMsYqVu/Nepmk77wG22b7HlVLM0sa2GYtW9Vrp1cLv5eKFh5o9ZIFVsZpmUNNB2P3kbiRnJ1d529DaoegS3MUCqzKep4snnm7/NL46/hW+OvaVXYRSWkmL2TtnAwCe6/gcWvu11ut+o1qNQs8GPXEo9hD+98//sGr4KkiSVHysJ9s9iU5BnSo9xpjWY3Ak7gjWRq7lUIoZxaqhVHZBNtr7t8ekDpMwcu3IKm9/KOYQntr4FD4f9DmGNR+GW5m3MHXzVDy36TmsH8vD1SyFQynbIIacDwkdgn+j/0V6fjoup1xGO/92ZntM0b4XHEyh1Jo1PFeKMVa5ckOpLNN23gNss1KKQylmDe3825n1/37GqqJSqfBAk+r1CuWMrjPw1fGvsOnKJtxMu4mGPg2tvaRK/XruV5yIPwFPZ0+83e/tqu9wj0qlwmeDPkO377rhpzM/4cVuL+JG2g0ciDkAN0c3vH//+1UeY3TYaLy842UcjDmIWxm3iqutGNOXVet3H2r2EN67/z2MaDVCr9sfiTuCRj6N8GK3F9HYtzF6hfTC852ex7Fbx8y8UqaLQynrkyQJZxLPAADCA8OLXw0x97BzUSklQikAOHpU3u6dMcZKq6xSypTZE7Y4U4rb9xhjrHpoWbclHmj8ALSSFstPLLf2ciqVU5iDeXvmAQDm9ZoHfw9/g+7fNbgrxrcdDwkSXt7xMl7Z9QoAYHb32Wjg3aDK+9f3qo+eDXoCANZFrjNw9YzZ2e573et3R2x6LLZe3QpJkpCUlYR1F9dhSLMh1l5ajWLs7nuAHEplZCi3npooISsBt3Nuw0HlgNb1WqN1vXuhlBnnSkmSXCkVFAR06EAnYHfvAlevmu1hGWN2rrCQLnVDqfgs+mXC7XuMMcZs1cyuMwEA3576FnlFtvuL/fMjnyMuIw4h3iH4v/v+z6hjLHxgIVwdXbH/5n5EpUYhwCMAr/Z6Ve/7j2k9BgCwNpJ34WOGs6tQqmdIT6weuRpj142F83vOCPg0AN4u3lg6ZGmF98nPz0dGRkbxW2ZmpgVXXD1xpZT1iXlSLeu2hJuTm0VCqdRU+UQrKIhOMDvdazHnFj7GWEUqrZSyk/Y98WKMvqFUYSGg1Zp3TYwxxsxraPOhaODVACm5KVh7wTbDlispV/DhoQ8BULDk5lTFf1QVCPEOwcv3vVz873f7vwsPZw+97z86bDRUUOFw7GHEpscatQZWc9lVKBV5OxIvbX8JC/oswMkpJ7F9wnbcSLuBqZunVnifhQsXwtvbu/gtLCzMgiuufrRa+QSgVi3D78+hlDJEKNU+oD0AWKR9T7Tu1akjt6iIFj4OpRhjFRGhlJOT/L7imVJ21r5X1f97Ljr7THALH2OM2TdHtSOmdZ4GAPjq2FdWXk1Zd3PvYuivQ5FVkIXeIb0xrs04k473Wq/XEB4QjoFNB2Jih4kG3TfIMwi9G/YGwC18zHB2FUotPLgQPUN6Ym7PuWjn3w6DQgfh64e/xvcR3xe/6lravHnzkJ6eXvwWGRlp4VVXL+LVYoArpaxJhFId/DsAQHGlVFRqlNnKi3WHnAscSjHGqlK6UqpQU1i8Q5M9tO9ptXLApG+lFMAtfIwxVh082/FZODs443j8cZuaY1yoKcRjfzyGq3evIsQ7BH889gfUKtNO7b1cvHDq+VPY8cQOOKgdDL7/mDBu4WPGsatQKqcwp8wPm4OKfmAklD9p2cXFBV5eXsVvnp6eZl9ndSZa94Cq/zgvj5cXXXIoZZriUCqgAwAgwCMAvq6+0EpaXLpzySyPqTvkXBCh1PnzAHfGMsbKUzqUSspOAkCvQNetVdfo41qqfU83XKrq/z1HR0CloutcKcUYY/avnns9jG09FgCw9HjFI2MsSZIkzNw6E/9E/wMPZw9sfnyzwcPNzWFU2CiooMLRuKO4mXbT2sthdsSqoVRWQRYiEiOKT7CjU6MRkRiBmPQYAMC83fPw1Ianim8/rPkwrL+4HsuOL8P11Os4FHMIL25/EV2Du5r0aivTnwilatUC1EY8e7hSynRZBVm4dvcaALl9T6VSmb2FT3fIuRAUBISEUCXB8eNmeVjGmJ0rHUqJymZ/d3+TXtW1VPue7vGrCqVUKh52zhhj1Y0YeP77+d9xO/u2lVcDLD66GCtOrYAKKvw26je09W9r7SUBoBfJ+zbqC4Bb+JhhrBpKnYg/gfBvwhH+TTgAYNbOWQj/JhwL/l0AgGZOiIAKAJ7p8Aw+G/gZvjr+Fdp83QaP/fEYWtRpgfVj1ltl/TWRKUPOAQ6llHAu6RwkSAjyDIKfu1/x+8097Ly8SikAuO8+uuQWPsZYeUqHUvGZpu+8B1iuUkoc38kJcNCjm4FDKcYYq166BndFl6AuKNAUYMXJFVZdy5YrWzB752wAwCcDP8HQ5kOtup7SRAvfmgtrrLwSZk8crfng/Rr1g/Rm+W13ALBq+Koy73uh2wt4odsLZlwVq4yYKcWhlPWUbt0TLBVKBZU6j+zeHVi7lkMpxlj5CgvpsrhSSoEh54DlZkqJUErflnUx7Jzb9xhjrPqY0WUGnvnrGSzYuwDxmfF49/53UduttkXXcC7pHMb9OQ4SJDzX8bkSu+XZipGtRmLmtpk4Hn8c0anRaOzb2NpLYnbArmZKMevTbd8zBodSpiveec+/fYn3W6p9r3SllJgrdfQoIFWcMTPGaqiK2vcCPZQJpSxVKaVvKMWVUowxVv2MbzseT7V/ClpJi69PfI3mXzbHtye/hUarscjjJ2UlYdhvw5BVkIX+jfpj6ZClUIkhhjbE38Mf/Rr1AwD8EfmHdRfD7AaHUswg3L5nfRFJEQAqrpS6nnodOYU5UFpF7Xvh4VQZkJICXLum+MMyxuycudr3LDVTSlQIc6UUY4zVXE4OTvhx+I/49+l/0bpea6TkpmDK5im4b+V9+C/uP7M+dl5RHkasGYGb6TfRrHYzrBuzDk4OTmZ9TFMU78J3gXfhY/rhUIoZRKlQKj+f/2A3hkarwbmkcwDKhlJ+7n6o41YHEiTFd+ArLASSaMOsMu17zs5Ap050nVv4GGOliVDK6d7fz8XtewpVSlmqfU/fCmGulGKMseqrX6N+OP38aSwetBheLl44EX8C9628D5P/mozk7GTFH0+SJEz+ezKOxB2Bj6sPNo/fbPG2QUONbDUSDioHnEw4iai7UdZeDrMDHEoxg5gaSnl6yte5WspwV+9eRW5RLtyd3NHUt2mJj5lzB76kJGrNc3IC6tUr+3Eeds4Yq0iZ9j2FZ0px+x5jjDFLcnJwwkv3vYQrM6/g6fZPAwC+j/geLb5qga+OfYUibZFij/X+gffx67lf4ah2xLrH1qF5neaKHdtc6rnXw/2N7wfALXxMP1YddM7sj6mhlIMDBVOZmRRK+flVfR8mE/Ok2vm3g4O67DZQreu1xv6b+xUfdi5a9wIDAXU5UbaYK8WhFGOsNHO372k0VM3pZKZOBh50zhhjrDz+Hv5YNXwVpnSagplbZ+J04mm8sO0FLD66GMFewVUfoApaSYuDMQcBAEuHLMUDTR4w+ZiWMqb1GOy6vgtrL6zFa71es/ZymI3jUIoZxNTd9wBq4ROhFDNMRTvvCebagU8MOS/duieIUOrcOSArC/DwUPThGWN2TDeU0mg1xe0NSrXvAVSVZCuhFFdKMcZYzdKjQQ8cf+44vj31LV7f8zqiUqMQlapc29r/dfs/TOk0RbHjWcKIliMwdfNUnE48jWt3ryG0dqi1l8RsGIdSzCCm7r4HUCgVF8ehlDEq2nlPMFf7XkVDzoXgYKBBAyA2Fjh+HOjfX9GHZ4zZMd1QKjk7GVpJC7VKDT9300plRfgDUHCk2x6uJK6UYowxVhUHtQOmdp6KMa3HYN+NfYq18Pl7+KN3SG9FjmVJdWrVwdIhSxEeGF5m5AhjpXEoxQxiavsewDvwmULfSqnotGhkFWTBw1mZkiURSlVUKQXQXKnYWGrh41CKMSYUFtKls7Pcuufv7l9uC7IhVCoKgPLzzTtXiiulGGOM6au2W22MaDXC2suwCc93ft7aS2B2ggedM4NwKGU9iVmJSMpOglqlRlv/tuXepp57veLqg4u3Lyr22KJ9r6JKKYDnSjHGyqdbKaXUkHPBEsPOOZRijDHGGDMfDqWYQTiUsp4ziWcAAM3rNEctp4r7J80xV6qq9j1ADqWOHqWd+hhjDCgVSmXeC6VMnCcliKDInAGQmKXI7XuMMcYYY8rjUIoZhEMp66mqdU8oDqUUnCtV1aBzAAgPp5POO3eAKOVmOzLG7JwIpZyclNt5TxBVSZaolNJ3liJXSjHGGGOM6Y9DKWYQDqWsJyIpAkDFQ86F4mHnFq6UcnEBOnWi69zCxxgTym3fU7hSyhbb97hSijHGGGOsahxKMYOINgYOpSzP4EophUKprCwgI4OuV1YpBdCwc4BDKcaYzBIzpcxZlWTs7ntcKcUYY4wxVjUOpZhBRKWUvm0M5eFQynDZBdm4fOcyAD1CqXuVUjHpMcjMzzT5sUXrnqdn1Vuu87BzxlhpuqGUaN+rCZVSHEoxxhhjjFWNQylmEG7fs47zyechQYK/uz8CPAIqvW1tt9rFt4m8HWnyY+vTuieIUOrsWfm5whir2cobdG6PM6V40DljjDHGmPI4lGIG4VDKOs4k0c57VVVJCUq28IlQqqrWPQCoX5/etFrg+HGTH5oxVg0UFtKlo5MWiVmJAKp3+x5XSjHGGGOM6Y9DKWYQJUIpLy+65FBKf/rOkxKU3IFPtO/pUykFcAsfY6wkUSmVJ2VAI2mgggr+7v6KHJvb9xhjjDHG7BuHUswgSlZKieHZrGoilKpq5z1ByR34DGnfA3jYOWOsJBFKZRWlAgDqudeDk4OTIse2RPue2OCD2/cYY4wxxpTHoRTTW2EhUFRE17l9z3I0Wg3OJp0FYJ32PVEppU/7HiBXSh09CkiSyQ/PGLNzIpTKLEoBoNyQc8Cy7Xv6bvDBlVKMMcYYY/rjUIrpTXdwtRK77+XmyrNGWMWiUqOQXZgNN0c3NK/TXK/7iEqpuIw4pOeZlv4ZWinVsSMNNL59G7h+3aSHZoxVAyKUytDcAaDcPCnANtv3uFKKMcYYY0x/HEoxvYlQysGBQgdjiZlSAFdL6UO07rX1bwsHtYNe9/Fx9Sne3crUHfgMGXQO0AlZx450nVv4GGMilEorTAYABHkos/MeYJu773GlFGOMMcaY/jiUYnrTnSelUhl/HEdHuf2PQ6mqnUm8t/OefweD7qdEC59WCyTQDu56V0oBPOycMSYrDqUKKJSq7pVSHEoxxhhjjOmPQymmNyWGnAs8V0p/EUkRAPSfJyUosQPfnTvUYqlSAQEB+t+Ph50zxgTRpn23MBGA/c6U4vY9xhhjjDHlcSjF9MahlHUU77wXoN/Oe4ISO/CJIed+foCTAZtldepElxcv8rBzxmo6USmVkk9ll6K1WAnmrpSSJDnw4kopxhhjjDHlcSjF9Ca2xeZQynKSs5MRnxkPFVRo69fWoPsq0b5n6JBzoX59uszLA1JSjH54xlg1IEKpOwWUcivZvmfumVK6wZKhoRRXSjHGGGOMVY1DKaY3USllys57AodS+hHzpEJrh8LTxdOg+4bVCwMAxGfGIzU31ajHNzaUcnEB/P3pelycUQ/NGKsmRCh1O/deKGVH7Xu6YZeh7XtcKcUYY4wxVjUOpZjeuH3P8kTrnqHzpADA29UbDbwaADC+Wkq07+m7854uUS0VG2vUQ9c4kiShUFNo7WWwSmi0GmsvwS6JUKpIRf+JBHgYMKCuCuZu3xMVwo6O9KYPbt9jjDHGGNMfh1JMbxxKWd6ZpHs77xkRSgE6c6WMHHZubKUUADSgPIxDKT1oJS06f9sZrZa2MrqqjZnX5TuXUefjOmi/vD3239xv7eXYDUmSB53DoQB13OrAxdFFseObu31PHNeQCmFRKaXVAkVFyq+JMcYYY6w64VCK6Y1DKcszpVIKMH2ulAilTKmU4va9ql27ew2nEk4hKjUK7x9439rLYeVYeXol0vPTcTbpLPqu6osJ6ycgPjPe2suyeYW6xX8OBYrOkwIs176nb+seIAdlAFdLMcYYY4xVhUMppjcOpSwrtzAXl+5cAgC09zds5z3B1FBKtO9xpZR5Hb91vPj6F/99gai7UVZcDStNkiSsvbAWANCvUT+ooMKv535Fi69aYNGhRSjQFFh5hbarQPdL41Cg6M57gPnb94wJpVx0CsF42DljjDHGWOU4lGJ641DKsi7cvgCNpEHdWnWNPpHj9j37cOzWseLrhdpCvLbnNSuuhpV2PP44bqbfhLuTO7aO34oTU07gvvr3IasgC6/sfgXtl7fHrqhd1l6mTSpTKaXgkHPAcu17hoRSDg7y/CmulGKMMcYYqxyHUkxvYuAr775nGbqteyqVyqhjiB34krKTkJKTYtB98/OBO3foOrfvmdexeAql5vWaB7VKjXWR63Aw5qCVV8UEUSX1SItH4Obkho6BHXFo0iH88OgP8HP3w6U7lzDwl4EYvXY0YtJjrLxa21JcKaXSAmqN4qGULbbvATzsnDHGGGNMXxxKMb1V10qpJUuAoUOBzExrr6Sk4lDKv4PRx/Bw9kBD74YADG/hS0igSxcXoHZtwx9bVErFxdGwY1a+Ak0BTiecBgBMCp+EyeGTAQCzd86GVtJac2kMJVv3xrQeU/x+tUqNZzo8g8szL+Olbi/BQeWAPy/+iZZftcSHBz+ExE96AHIopXbQACrUiPY9QG7h4/Y9xhhjjLHKcSjF9FYdQymtFnjzTWDLFmD9euuuRZckSdh3cx8A44ecC8a28OkOOTemUEvcLz8fuH3b8PvXFOeSziFfkw9fV1809W2Kd/q/Aw9nDxy7dQy/n//d2sur8f679R9iM2Lh4eyBwaGDy3zcx9UHiwcvxunnT6NPwz7ILcrFvD3zsO3aNius1vYUV0o5UB+f0oPORUVSfj79PlcaV0oxxhhjjJkXh1J2JifHehU91TGUunJFXsMuGxoJcyj2EM4nn4eboxseavaQSccydti5KUPOAcDZGfD3p+vcwlcxMU+qa3BXqFQqBHgE4LWeNFNq3p55yC00UwkI08ua82sAAI+2eBSujq4V3q6tf1vsfXovnmz3JADwjKl75FCKSobM1b4HmCcAEm3rHEoxxhhjjJkHh1J2ZM4cCoQ+/NA6j18dQ6mjR+Xru3eb55V2Yyw9vhQAML7teNR2M6J3ToexoZQpQ84FHnZetePxtPNe1+Cuxe+b1X0WGng1QEx6DBYfXWyllTGtpMUfkX8AKNm6VxGVSoUhzYYAAPbH7Dfr2uyFCKW0akpnzNW+B5gnABKVUobOUuT2PcYYY4wx/XAoZUfq1KFLUcFiaUqGUl5edJmTU2p3Jgv77z/5elIScO6c9dYiJGQmYF3kOgDAjC4zTD6ese174nlmzJBzgUOpqulWSgluTm744IEPAAALDy5EUlaSVdZW0x2JPYJbmbfg5eKFgU0H6nWf3iG9AdBMuIz8DHMuzy7IlVJ0Ren2PUdHeac7c8yV4vY9xhhjjDHz4lDKjoiKFVHBYmmijUHJSikAyLDieZuolBKfky208H176lsUaYvQo0EPhAeGm3y8VnVbAQBu59zG7Wz9hzspUSnFO/BVLjM/E5G3IwEAXYK6lPjY+Lbj0TmoMzILMvHm3jetsbwaTww4r6p1T1ewVzCa+DaBVtLicOxhcy7PLhS/6OBQAB9XH72/joYQAZAthlJcKcUYY4wxVjkOpeyIqFixViglKqUMbWMoj5OT/Ee+tVr4srOBs2fp+rRpdGntUKpQU4jlJ5YDAGZ2manIMd2d3dHYpzEAw1r4uH3P/E4mnIQECSHeIfD38C/xMbVKjc8GfgaAgkpDK92YaQxt3dPVp2EfAMCBmwcUX5e90a2UUrp1TxD/l5izfc/Y3fe4UooxxhhjrHIcStkREQ5Uh/Y9wPpzpU6epBlSwcHAM8/Q+/bvt+5JxIZLG5CQlQB/d3+MChul2HHbB7QHQO1I+uL2PfMrr3VPV++GvTGy1UhoJS3m7JpjyaXVeIdiDiEhKwHeLt54sMmDBt1XtPDxXKmSoZTSQ84FERjZYqUUh1KMMcYYY5XjUMqOiFAqLU1upbOk6hZKida9++4DwsIofMnLAw4etM56AHnA+ZROU+Ds4KzYccVJ9Y6oHXrdXpK4fc8SioecB5UfSgHARwM+gpPaCduvbceOa/p9/5jpROve8JbD4eLoYtB9RaXUsVvHkFdUs1OJ4lBKXaj4PCnBFtv3eNA5Y4wxxph+OJSyI56eciBk6RY+SVJ2phQgh1LWmiklhpx36waoVMCD94ohrNXCdzbpLPbf3A8HlQOe7/S8osce1HQQAOBQ7CG9hi+np8vfbyUqpeLibGdnQ1tSVaUUAITWDsXMrtTKOWfXHGi0GousrSbTaDVYd5E2GzC0dQ8Amvo2RYBHAAo0BcXf45qqRPueR81p3+NKKcYYY4wx/XAoZUdUKusNO8/NpWAKqJ6VUoAcSu3caZ31LD1GVVIjW41EsJcJ5UnlaFq7KUJrh6JIW4R/ov+p8vaidc/X1/CTMV2BgYBaTcOOk5ONP051lJiViJj0GKhVanQK6lTpbd/o8wZqu9XG+eTz+P709xZaYc11MOYgErMS4ePqgwFNBhh8f5VKxXOl7inRvmemSilztu+JcJ4HnTPGGGOMmQeHUnbGWnOldNsFlRh0Dlg3lIqLo6+hgwPQ6V4eMODeuWdEhOUDlLS8NPxy7hcAwIwuM8zyGIObDgYAbL+2vcrbKtG6B9BA+4AAus4tfCUdv0Wte63qtoKHs0elt/V188WCPgsAAPP/nY/M/Eyzr68mE617I1qOMLqNludKEUvMlLJE+56h/+/xoHPGGGOMMf1wKGVnrLUDn5gn5eJCQY4SrBlKiSqpdu3kkw1/f6A9zQPHnj2WXc+qiFXIKcxBG782xRUWShscSqHUjqgdkETZWwWUGHIu8LDz8unTuqdrWpdpaFa7GZKzk7H46GIzrqxmK9IWFbfujW091ujjiJ/jw7GHUaQtUmRt9siSu+/Z0kwpbt9jjDHGGNMPh1J2xlrte0oPOQdsI5QSrXuCNVr4tJIWXx//GgAws8tMqFQqszxOv0b94OzgjBtpN3Al5Uqlt1WqUgrgUKoixUPO9QylnB2csaAvVUv9fPbnKoNFZpz9N/cjOTsZtd1q4/7G9xt9nDZ+beDj6oOsgixEJEYot0A7U1Bw73lqgfY9W5opxYPOGWOMMcb0Y9VQav/N/Rj22zAEfRoE1dsqbLy0sdLbP7PxGajeVpV5a/11a8ss2AZYq32vuoVSukPOdQ0cSJe7dskztMxtV9QuXL17FV4uXpjQboJZHiMiAlj0gTt6BNBJdlUtfCKUUqJSinfgK0uSJIMrpQDg0RaPwtXRFVfvXsWZpDPmWl6NJlr3RrYcCScHJ6OPo1ap0SukF4CaPVcqM/deKmPG9j2ulGKMMcYYs19WDaWyC7LR3r89lg5ZqtftlwxegoTZCcVvsS/HorZbbTwW9piZV2o7rN2+Vx1CqcJC4MQJul66UqpXL3qF+9Yt4NIly6znq+NfAQAmdphY5WwhY82cCbz9NuB2Yh4AYHtU5aGUCD25Uso8olKjkJqXChcHF7T1a6v3/TxdPDGk2RAAcnjClFOkLcKfF/8EYNyue6XxXCkgJZN2+3RyAtydFfwPRIclZkrxoHPGGGOMMfOwaij1ULOH8N7972FEqxF63d7b1RsBHgHFbyfiTyA1NxUTO0w080ptB7fvme7cOXr12scHaNas5Mfc3IDedB5pkRa+6NRobLmyBQAwvct0szxGXh5wnDrFcH5HN0AC9t3Yh9zCis/guH3PvESVVMfAjgZX44g5R2svrOUWPoXtvbEXd3LuoI5bHfRv3N/k4+nuwFdTv1cpWVkAgFqujmZ7DFtu3+NKKcYYY4yxytn1TKmVp1diQJMBaOjT0NpLsRjd9j1LnuNUp1BKzJPq1g1Ql/MToNvCZ27LTiyDBAkDmw5E8zrNzfIYJ07Iw4Zjb7igbvJo5Bbl4kBMxS1FSg465/a9skQo1SWoi8H3fbjZw3BzdENUahROJ55Wemk1mqg+G9VqFBzVpocoHQM7ws3RDSm5Kbh456LJx6vI5iubcSH5gtmOb4rULPrPw93NuF0M9cHte4wxxhhj9stuQ6n4zHhsu7oNz3Z8ttLb5efnIyMjo/gtM9O+t1IPCKDLwkLgzh3LPW5ODl0aui12ZWwhlCqPGHa+d6/OzlFmkFuYi5WnVwKgAefmcvBgyX97R84CUPFcKY0GSEyk60pWSt26Rcdmhu+8p8vd2R1Dmw8FwC18SirUFGL9xfUAlGndA2g4ffcG3QGYb67UoZhDGPbbMPRd1RdpeWlmeQxTpGfTfx6eZgylzNm+J/7v40HnjDHGGGPmYbeh1I8RP8LH1QfDWw6v9HYLFy6Et7d38VtYWJhlFmgmzs6Anx9dt2QLX3WqlBJDzkvPkxLatQPq1aPP+cgR863j9/O/427uXTT0blg8J8gcDh2iy/Hj6TLmSFcg16fCUCopicIjBwf5uWaKwEA6VlERHbumK9QUFlc4GRNKAXJowi18yvn3xr9IyU1BvVr10LdRX8WOa+65Ur+d/w0AkJKbgvf3v2+WxzBFeg4lRV61DEx1DGCu9j1Jko9p6AsyXCnFGGOMMaYfuwylJEnC9xHf48l2T8LZofJXX+fNm4f09PTit8jISAut0nyssQNfdQml7t4Frlyh610ryAPUarlaylwtfJIkFQ84n95lOhzUDmZ5HK0WOHyYrr/0EtC+PVBY4ADVuSdw8c5FxKTHlLmPeF4FBFCYZCoHBwqmAG7hA4DzyeeRV5QHH1cfhNYONeoYQ5oNQS2nWohOi8bJhJMKr7BmUrp1TxBzpfbf3K94gKjRarAucl3xv7849gWup15X9DFMlXFv9z3vWq5mewxzte/pBkrcvscYY4wxZh52GUrtu7kP1+5ew+SOk6u8rYuLC7y8vIrfPD09LbBC87LGsHNzhlJZWZZr6zpGXVNo1gyoU6fi25k7lPrv1n84lXAKro6umBxe9fPYWJcvUxDn5gaEhwPP3ut2dTv7IiABO67tKHMfJYecCzzsXKbbuqdSqYw6Ri2nWhjWfBgAYM35NYqtraYq0BQo3ron3Ff/PjiqHRGXEYeb6TcVPfb+m/uRlJ0EX1df3N/4fhRoCvDa7tcUfQxTZd0LpXw8zLPzHmC+9j3d43H7HmOMMcaYeVg1lMoqyEJEYgQiEiMA0E5kEYkRxdUb83bPw1Mbnipzv5WnV6JbcDe08WtjyeXaDDF8urqEUgCQkaHccSsj5klV1LoniFDq+HEKdZT21TGqkhrXZhzq1KokHTORmCfVrRttyT5hAp0s5cQ1A+I7Y3tU2RY+8bxSYsi5IIad20soJUlAdLR5NhMwZci5ruIWvkhu4TPVnut7kJqXCj93v+LKJqXUcqqFzkGdAVCIpCRR3TWy1Uh8PuhzqKDCH5F/4FDMIUUfxxTZeYUAgNpmDKXM1b4nQilHR3ozBFdKMcYYY4zpx6qh1In4Ewj/Jhzh34QDAGbtnIXwb8Kx4N8FAICErIQy7UXpeen4M/JPs1aX2Lrq0r7n7Cz/4W6pFr6qhpwLwcFAWBiFEv/8o+wakrOTi08mzTngHJDnSfXsSZe+vsDo0fc+eOpZ7L6+G4WawhL3Ec8rc1RK2Uv73ldfAU2aAD/9pPyxj8UbP+Rc10OhD8HdyR0x6THFQRczzupzqwEAo1uNrrSV9tw5IDXV8OOLuVJKDjsv0hbhz4t/AqCAsp1/u+L/F2ftnAWtpFXssUyRk1cEAPB1N1+Vsrna94zdeQ+Q/2/jSinGGGOs+tu/Hxg2jF7UV6mAjRtLflySgAULaKSJmxswYABw9WrJ29y9SwUEXl6Ajw8weTJ1FOk6exbo3Zv+zmjQAPj447Jr+eMPoGVLuk3btsDWrYavxdKsGkr1a9QP0ptSmbdVw1cBAFYNX4W9z+wtcR9vV2/k/C8Hz3V6zvILthHWaN8TOxApGUoB9EMHWCaU0mrl9r2qKqUA87Xwbby0EYXaQnQO6oxOQZ2UPXgppUMpQG7hw7kJyMjQ4L9b/5W4D7fvAX/9RZc7dyp73KyCLETeprl2plZKuTm54ZEWjwDgXfhMsfrs6uJQakK7CRXe7tgxmsn29NOGP0bxXCkFh53vu7EPt3Nuo45bHfRv1B8A8O7978LdyR3Hbh2zibbOrIIsFBVQi2pdM7bOm7t9z5hQSrTvWatS6pVXgCFDaIMJxhhjjJlXdjb9nbh0afkf//hj4IsvgOXLadMtd3dg0KCSfydMmABcuEDnnps3U9A1ZYr88YwMYOBAoGFD4ORJYNEi4K23gBUr5NscPgw8/jgFWqdPA8OH09v584atxdLscqZUTWfN9j1DdyCqiiWHnV+9SlUOrq60w15VRCi1c6eybVw7omiO0yPNH1HuoOVISgKuXaO0vnt3+f19+wKhoQAKPIDIx8rswicqpWpq+55WC5w4QdeV3hfhVMIpaCUtGng1QKBnoMnHEy18f0T+YTOVMfbkSOwRTP6bqote7fkqejToUeFtt26VKye1Bn6pezboCRVUuJJyBUlZymxBqdu65+TgBAAI8AjAa71optRre15DbqHCKY2BEjITAA1tRuLpZn+DzpWolLLGH3hFRcDnnwPbtlF1H2OMMcbM66GHgPfeA0aMKPsxSQIWLwbmzwcefZTOQ3/6ic65REXVxYvA9u3Ad99RR0+vXsCXXwK//y6fm61eDRQUAN9/D7RuDYwbB7z4IvDZZ/JjLVkCDB4MzJ0LtGoFvPsu0LEjdYHouxZr4FDKDlWX9j3AsqHUf/cKgjp1ovlKVenbl2534wYQFaXMGgo1hdh9fTcAYHDoYGUOWgFRJdWmDZWACioVpecAgFPPlgmlzFkpZQ/te9euyc/Hy5cNDyAqozvkXAmDQwfD09kTsRmx+C/uv6rvwIrdSLuB4WuGI1+Tj+Eth+ODBz6o9Pbi5yk7G7hu4AZ3vm6+aOvfFgBwIMb0Fr7SrXu6ZnWfhWDPYMSkx2DJf0tMfixTJGTJoZSLi3FD/fVhrplSokLYlEopa7TvxcbKFVKWfPGKMcYYq24yMzORkZFR/JZvxH/s0dFAYiK1yQne3hQ+HTlC/z5yhM7XOneWbzNgAO0KL85hjxwB+vShETjCoEF0viLGSxw5UvJxxG3E4+izFmvgUMoOibDg9m3L/cFbHUIpfYecCx4eQI97hRNKtfAdjTuKjPwM1K1V1yqte8LTTwMODhIQ2xMnz+QgOTu5+GPmGHQuQqn4eMvttGgsUSUFUKXETQU3TFM6lHJ1dMWjLR8FwC18hsjIz8Cw34YhOTsZHQI64OcRP0Otqvi/w6Ii+fcHAEREGP6YSs6V+jf6X6TkpqBerXro16hfiY/VcqqFhQ8sBAB8cOCDEj/blqZbKaX7B5TSuFKqJN0XUezhhQDGGGPMVoWFhcHb27v4beHChQYfIzGRLv39S77f31/+WGIi4OdX8uOOjkDt2iVvU94xdB+jotvofryqtVgDh1J2qHZt+VXYhATLPGZ1CqWqGnKuS+m5UqIq6cEmD1Z6EqyEykKpwEBg6NB7lQunJ2NXFH2COTlAWhq9W8lKKX9/+sWq0VjuOWus48dL/vviReWOrdTOe7rGhNXMFr4ibRHS8tIMvp9Gq8H4P8fjfPJ5BHgEYNPjm+Dh7FHpfc6dKzlo8swZgx9W0blSIoAc1WoUHNVlt4Wb0G4COgV2QmZBJt78902TH89Y8ZnxgJbKUs0ZSpl7ppQxbeu6g84tvTmmbijFlVKMMcaY8SIjI5Genl78Nm/ePGsvqVriUMoOqVRyFYulWvhsLZT6/nsa/CbWVZWcHNqtANC/UgqQQ6k9e5QZGCvmSZm7dS8nBzh1iq6XF0oBOgPPzzyFLZeopVA8n9zd5SH0SnBwkJ+ztv7KvQilRPCrVCiVnJ2Mm+k3oYJK0Sq5gU0HwsvFC7cyb+Fw7GHFjmurJEnC35f/RvMvm6Peonp4ZdcryMzP1Pv+c3fNxZarW+Dq6Iq/x/2N+l71q7zPwYMl/21MKCUqpc4knkF6nvEpfKGmEOsvrQdQtnVPUKvU+HTgpwCAFadW4ELyBaMfzxS67XuWqJRSuipJiUHnAM1/sCQOpRhjjDFleHp6wsvLq/jNRfc/eD0FBNBlUqmxoklJ8scCAoDkUsXtRUW0I5/ubco7hu5jVHQb3Y9XtRZr4FDKTll6Bz5z7b5nbCj16qvAt98Cb7yh3+1PnqQqncBAeei2Pjp1Anx9abeD0hU0hkrOTsbJhJMAKEgwp+PHgcJCCoIaNSr/NoMHA3X984GcetiySQ2tpC0x5Fyl8AgYe9iBr6hIDvMepa44xUKp47foCdSqXit4uSiX+Lk4umB4y+EAqn8L39WUq3j414fx6O+PIjotGkXaIiw6vAgtl7bEb+d+g1RFScq3J7/F50c/BwD8NPwndAnWr2JNVB0OvPdja0z7XqBnIEJrh0KChEOxhww/wD3/RP+Du7l34efuV1x9VZ6+jfpieMvh0EpazN011+jHM4WlQ6ncXGWrkpRo3wMs38LHoRRjjDFmOxo3psBnzx75fRkZNCtKbEbVvTt1q5w8Kd9GbK4juny6d6cd+QoL5dvs2gW0aEHnq+I2uo8jbiMeR5+1WAOHUnbK0jvw2dLue+npwJ07dH3JEtqqvSpiQNx99xkWtjg4AA88QNdNbeHbGbUTABAeEI4AD/NG0bqtexV9vo6OwLOTqPUn48gYnE44bZYh54I97MAXGUknop6eciil1A58Ss+T0iVa+NZFroNGa+NDu4yQXZCN1/e8jjbL2mDbtW1wdnDG671ex4axG9DUtyniM+Mxfv149PuxH84llb/d2D/R/2D61ukAgHf6vYPHWj+m9+OLn6dp0+gyNpZeuTKUEnOlRPA4utVoOKgdKr3txwM+hqPaEduubSv+/WNJ8ZnxxaGUPptLGEsEQFptyT/UTGVKKKUbwll62DmHUowxxphlZWXRi5bihcvoaLoeE0PnYv/3f7Q7399/01iIp56i8/nhw+n2rVpRwcBzz9G57aFDwMyZtMOeOO8fP57+vpg8GbhwAVizhs6FZ82S1/HSS7SL36efApcuAW+9RfNyZ86kj+uzFmvgUMpOWbpSypba96Kj5etaLf1gVtUeYeiQc12ihW+nied0Yp7UoKaDTDuQHiqbJ6XruWfvndRGPYjfDx41ayhlDzvwiWq4Tp1o10KAKqWUqL44Fn8vlApSPpR6sOmD8HbxRkJWgklVOLZGkiT8ceEPtFzaEgsPLkSBpgAPhT6E89PO4/0H3sfwlsNxfvp5vH//+3BzdMP+m/sR/k04Xtr2Uol5U1dSrmDU2lEo0hZhfNvxmN9nvt5riImh56yDA/0uaNyY3m+NuVIFmgJsuLQBQMWte7qa1WmGGV1mAADm7Jxj8cDS0oPOAWWrkkwJpVQquYXPkpVSksShFGOMMWZpJ04A4eH0BlBQFB4OLFhA/37lFeCFF2j8TJcuFGJt316ysnr1aqBlSyqIGDIE6NULWLFC/ri3N52PRkfTucrs2XT8KVPk2/ToAfz6K92vfXtg3Tpg40b5vEbftVgah1J2SoQGlpgppdHIr/TaQigl/uBu0QKoWxc4fx746KPK7yMqpQwZci6IUOroUSpvNIZW0hZXKph7npRWCxy+N1qoqlCqSROgRZc4AGqs+cW9RPue0uyhfU+EUl26AM2b0zasaWll+64NJUmSPORcz5YxQzg7OGNEqxEAqk8LX+TtSAz4eQDGrBuDuIw4NPZpjL/G/YUt47egWZ1mxbdzdXTF671fx6WZlzA6bDQ0kgZfHPsCzb9sjh9O/4CUnBQM/XUo0vLScF/9+7DykZVQGVAuKQLe8HD6/de+Pf3blLlSx28dR26h4VO591zfg9S8VAR4BKBXSC+97rOg7wL4uvriXPI5/BDxg8GPaQpLte/pjndQcti5KaEUUHLYuaXcvl1yKH9amv6zFxljjDFmnH796IWh0m+rVtHHVSrgnXdoh7u8PGD3bjrX0FW7NgVKmZl0bvz997QbvK527YADB+gYcXE00qa0xx4DLl+mvz/On6eAS5c+a7G0stv2MLtgyfY93T9ozRVKGRL2XL9Olx07AkOHAhMmAO++C4waBYSFlb19XBy9qdVA586Gr7FxYyA0FLh2Ddi7F3jkEcOPcTrhNG7n3Iansye6NzBvw25kJJ2IuLsDHTpUffvpU5zx0nEgdt8ARDkUAnCyWPtecnYytl/bjsfbPA4nBzP29+jpxAm67NKFTigbN6YQ9OJF04b/XU+9jru5d+Hs4Ix2/u2UWWwpY8LGYFXEKqyLXIclg5dU2dplDeeTz+Pvy39XOfspNiMWK0+vRJG2CK6OrpjXax7m9pgLN6eK04EQ7xD88dgf2H19N17Y9gIu3bmESX9PgqezJzILMhHiHYKNYzfC1dGwl4FKVx22b0+vOBkTSjXxbYIgzyDEZ8bjv1v/oV+jfgbdf82FNQD0a90TarvVxht93sCsnbMw/5/5GNt6LDxdPA1dusFyC3OpWs0CoZRKRT+veXm2FUpZo1Lq2jW6DAkBUlLo/+9bt6z/xyZjjDHGWEU4lLJTlmzfE6GU+MNfScZUSolQqmlT4PHHKVHesoV2kztwgNpsdIkqqbZtjQ/VHnyQ/tjfscO4UEq07j3Q5AE4O5jx7AzySXS3bjQ3qirPP+mHl19KgzajPrZvo/Yec1ZK6bbvvbb7NfwQ8QOiU6PxZj/rbV0P0KsJYofGLveKmVq1kkOp/v2NP/bxeCrBCg8IN9v3/4EmD8DX1RdJ2Uk4EHPA4MDD3E7Gn0TvH3ojt0j/1GB4y+H4fNDnaOTTSO/7DGgyAGemnsEX/32Bt/e9jcyCTHg4e2Dz45vh7+Fv8LpLh1Ii6DVm2LlKpULvkN5Yc2ENDtw07HuUX5SPjZc2AtCvdU/XjK4z8PWJr3Ht7jUsP7Ecc3uaf/D5ueR7s70sEEoBFBzl5SkbAIkNPkytlLJkKCUqiZs2pXVfvsyhFGOMMcZsG7fv2Snd9j0ldxsqj+48KaV3ZDMllGrShNazbBkNpj5yBPj667K31x1ybixR9vjTT8YFgTuidgCwrXlSgosL0G5gBACgIJ8SPXPOlEpIoF3uAOBI3BEAwPKTy1GgsfC+6aWcOUNDkuvUARo2pPe1akWXpu7AZ84h54KzgzNGthoJwPZa+G5l3MIjvz+C3KJcdA7qjOc6Plfp29ROU7HjiR3YMHaDQYGU4OzgjDk95uDyzMtY0GcBdj+5G2392xp8nMxMOajUrZQCqCKxqll25TF2rtSu67uQnp+OQI9A9AzR84f7HmcHZ7zW8zUAwLITyywyW2r5ieUAACfQ7hiWCKUA81RKGbvBhzXa93RDKUvPnmSMMcYYMwZXStkpUcmSk0OBjo+P+R5LvFqs9M57gOmhFEBhx0cfAdOnA/PmUSWTCBUA04acC0OG0DaZR47QjgV//KH/fdPz0nE4loY8WSKUOniQLvUNpQBgyrMOmL5R/rc5Qik/P9qBq7CQwtS6gTm4knIFAJCYlYj1F9djXJtxyj+wnnTnSYnw1Z5CKYAqaFaeXol1kevwxUNfwFFd8a94SQJOnwZaty45k0dp2QXZeOT3RxCfGY/W9Vpj95O74e3qbb4H1BHkGYS3+79t9P2PHqUZbY0ayb9zGzUCvLyo5fjSJertN4SYK3Uk9ggKNYV6t62KoPGxsMegVhn+etLjbR/H3F1zEZ0WjW3XtmFo86EGH0NfKTkp+O38bwAAtZaSGXsOpeypfU83lBJhGIdSjDHGGLNlXCllp9zcAF9fum7uPzjNtfMeIIdSmZl08lcVjQa4cYOui1AKAJ5/Hujdm9b6/PNy9VhRkTwnyJgh54JaDSxfTq2B69bRDgX62hO9BxpJgxZ1WqCxb2PjF6GHhATakUGtphBNX08P7ARVg6PF/w4MVH5tarUcdsXF0XwhrSR/07869pXyD2oA3VBKEDPKIiONP26hphCnEk4BMH8o1b9Rf9Rxq4PbObex78a+Sm/7yy+0c8drr5lvPVpJi6c2PoVTCadQt1ZdbHp8k8UCKSWUV3WoUpk27Ly1X2v4uvoiuzAbpxNP63WfvKI8/HX5LwCGt+4JtZxqYVL4JADm/1n7/vT3yCvKQ3hAOIoK6c8MJzOPjBNVSbYUSlm7fY8rpRhjjDFmDziUsmOW2oHPEqGUJFEwVZW4OAqanJ1Lzj1Sq4Fvv6VXpnfsoBNuADh3jk4svL1ptz5TtGsHvPQSXZ8xQ/+THzFPyty77gHySXTbtlTNoa9aTrXQaiBVc3n45pitqkF3B76IxAgAQHv/9nBUO+JQ7KHi91lDeaFUy5Z0mZBgWDWfrgu3LyC3KBfeLt4IrR1q2iKr4OTgpHcL329UyILVqynsNYc3/nkD6y+uh7ODMzaO3Wj2UFZpFbXCirlSxoRSapW6eOe8AzcP6HWfnVE7kZGfgWDPYJM2SpjWeRpUUGFH1A5cTblq9HEqo9Fq8PUJ6qOe3ukFaDRUdmipSiklAyClKqWs3b6nO8ePMcYYY8zWcChlxyy1A585QylXV/lkRZ+TfvEHd6NGZQeat2gBvHlvVvb//R+QnCy37nXtSsGVqd56i/7Qv34dWLiw6ttLkmTT86R0PfWEI9B+FRo8+p2yi9KhuwOfCKAGNR2EUa1GAQCWHltqtseuTGam3KKnG0p5e8s/Z8a28InWvS7BXYxquzKUqKT58+KfKNQUlnubnBzg33/p+u3bwOHDyq/j5zM/44ODHwAAvhv2ncFzkKytqIjadYGyP0+iUsqYYeeAPFdq3cV1tENdFUxt3ROa1m6KIc1oQN7Xx8sZwKeAbde24UbaDfi6+mJki7HF76+J7XuWrpTKzKT/9wCulGKMMcaY/eBQyo5Z6g9Oc4ZSgGFzpXR33ivPnDlUxXD3LvDii8oMOdfl6Ql88QVd//BD2tmoMpfuXEJMegxcHFzQt1FfZRZRCWPmSQmDWvYBRkxEQkvz7YKnuwOfCKU6BHTAzK4zAQCrz61Gam6q2R6/IqdPU7Ve/fpAQEDJj5k6V+qPSBpA1rOBZUKZfo36oW6tukjJTcHeG3vLvc2//5Y8Ud64Udk1HIw5iGc3PQsAeL3X63iy/ZPKPoAFnD1Lv/u8vGjuli7d9j1jNpoY0mwIHNWOOBp3FM2/bI7vT39fopVVV25hbnHr3tg2Y8u9jSFmdJkBAPgh4gdkF2SbfLzSRGvg5PDJcJTkQYTmDqVsuX3PUpVS4v/HOnXo/1XxIgCHUowxxhizZRxK2bHq0L4HGBdK6c6T0uXkBKxcSVVUa9bQ/CdAuVAKAEaMoMHnhYU0XL2yk1LRute3UV/UcjLDpHgd2dkUrgBAr16G3795HdozPC0vDXdz7yq4MpkIpWJiJJxNom3NOgR0QM8GPdHOvx1yi3LxQ8QPZnnsypTXuieYEkpdunMJu6/vhlqlxsQOE41foAEc1Y7FlWcVtfBt3UqXjRrR5caNyu3iGZ0ajRFrRqBAU4BRrUbh3fvfVebAFiaqDnv0KFuV2bo1ve/OHeN+/4bVC8POJ3aiVd1WuJ1zG5P/noweK3vgRPyJMrfdEbUDWQVZCPEOQbdgEwbj3TModBCa+jZFen46Vp9bbfLxdF1JuYIdUTugggrTukwrsTuhuWdK2XL7nqUqpXRb9wD5b4TERPO16DLGGGOMmYpDKTtmqfY9sfuePYRSANCxIzB7Nl0XgVpXBedLq1TAl1/Sq+D//CPP5inP9igKpSzRunfsGJ141K8PhIQYfv9aTrUQ7ElnMdfuXlN4dUS8cn/tRj6yC7Ph5uiG5nWaQ6VSYWYXqpb6+vjXFVaNmIsIpTp3LvsxU0Kpr49/DdzsCc/vr+LfjQ2rvoNCRAvf+kvry7TwSZIcSn34IT2Pr18Hzp83/XHT89Ix9LehuJNzBx0DO+LH4T9apGXRHCprhXVzk2fUGTNXCgD6N+6PM1PP4JMHP4GHswf+u/Ufun7bFc9veh53cu4U3063dU8ltoU0gVqlxvQu0wFQVZOkVBoJYNnxZQCoEqyJbxMU3nvqOTiUDfaUZo72PfF/n72075UOpfz96euu0QBJSZZZA2OMMcaYoezzbIEBsHz7Xi0zFfqIgdxKhVIAzX4KvTdTOjQUqFvX6OWVq0kT4I036PqsWUBaWtnb5BbmYv/N/QAsO+TcmNY9oWltOpuJuhulwIrKktv36ES4rX9bOKjpbHV82/HwdvFGVGoUdlzbYZbHr4g+lVKG7sCXmZ+JVRGrgOPTkX6zCSZOBP73P/12mTRVn4Z94Ofuh7u5d/FP9D8lPnbxIu1g6eICDBsGPPggvd/UFr4ibRHG/TkOkbcjEeQZhL/H/Q13ZzMl2RZQ1c+TKcPOBScHJ8zuMRtXZl7BE+2egAQJK06tQPMvm2PZ8WXIKsjC35f/BmD8rnvlmdhhItwc3XAu+RwOxhxU5JhZBVnFVY6iHVdUSpm7dQ8wb/uesf/3WXrQeelQysFBbkfmFj7GGGOM2SoOpewYt+9VzM0NWLUK8K2txdNPK1cJoGvOHNqdLSmJwobS9t3ch7yiPDTwaoBWdVuZZQ26TJknJYT6UpJnrkopEUql3nEBipzQwb9D8cfcnd3lLeuPm2/L+tzCXOQU5hT/OyVFfl6VVykVFkaX0dGGnfD+fPZnZOZnwjHu/uL3ffABMGGC+SsnKmvhE1VS/fvTyfbw4fRvU0Op2TtmY/u17XBzdMPf4/5GsFewaQe0opgYmnvm4FBxlaWpw851BXoG4ucRP2P/M/vRzr8dUvNSMX3rdLT4qgWyC7PR0LshugSVk5gaydfNF0+0ewKAcj9rq8+uRnp+OkJrh2Jg04EALBtK2WL7nrUrpQDegY8xxhhjto9DKTsm2vcSE2mnKHOxlVAqPZ0CBABorMfO8jc8VyPzJVecbDYSRVrlv0DOzsDX9zawWrZMrrYRxDypwaGDFWm7qYxGI+8UZsw8KSG09r1QKtU8oVTduvdOUCU1kBmE9gHtS3xctBVtu7pN8Wqt/KJ8fHjwQ9RdVBfh34QjqyALAHDi3hif0FDA17fs/fz86P2SBFy5ot9jSZKEpceXAqlNUJQWACcneq44OgK//w4MGEDziMxpbGsair3h0gYUaOThPlu20OUQ2oQNw4bRzpSnTgE3bxr3WH9d+gtfHKMdAH4Z+Qs6BXUyet22QFRJhYdX/HtPd9i5Uno37I2TU07iy4e+hI+rD+Iz6RWHMa3HKP47RAw8X39xffHjGKv4+Q5geufpxS2bIpQy9zwpwLZ337NWpRTAO/AxxhhjzPZxKGXH/PzolXyt1rzzImwllBLVLH5+tAteZQ7FHMKkvyehSCrExksbMWfnHNMXWo7+/YEnnqDAYurUksNkRShliXlSFy4AGRmAhwfQtq3xxykOpcxUKaVWy3OlkFEfHQI6lHn8waGDIUHCshPLFHvc7de2o+2ytpi3Zx5yCnNwJeUKPjn8CYDKW/cAmiFm6FypvTf2IvJ2JFziBhUfe9o0YMcOer4fOgR07w5cvWrKZ1W5XiG9EOARgNS8VOy5vgcA/YyJirqHH6bLevXkIPOvv4x7rE+PfAoAmN19Nka2GmnKsm2CPq2won3vyhX5d6QSHNWOmNl1Ji7PvIznOj6HrsFdiwMkJbUPaI9eIb1QpC3CipMrTDrWgZgDOJd8DrWcauGZDs8Uv9+e2/ckyb4GnRcWUoUfUDKU4h34GGOMMWbrOJSyYw4OQGAgXTdnC5+thVJVte5Fp0Zj+JrhKNAUoFMgVWws+W8Jlp9YbuJKy/fJJ4CPD1WaiMqpG2k3cDnlMhxUDnigyQNmeVxd4iT6vvuoGsdY5g6lAMA/8N6ZakYI2vqVTdDEwPOVp1eWaLMzxo20GxixZgQeWv0Qrt69Cn93f0zrPA0AsOjwItzKuFVcKVVRKAUYHkqJlqhG6U8CAPr2pffffz9w+DDtenftGn2/Dioz0qcMB7UDRrcaDQBYG0ktfLt3U1VlixYlf45MaeE7m3QWB2IOwEHlgJfve9m0RdsIfUIpf396kyRlhsSX5ufuhxXDVuC/Z/9DQx/zDMkXP2vfnPymRDWdoUSV1BNtn4Cvm1xuaI32PaVCKd3qJnto37t5k14UcXOT/y4AuFKKMcYYY7aPQyk7Z4k/OG1l9z19QqmM/AwM+21Y8e5f+57Zh/f6vwcAmLl1JnZf363Aikvy9wcWLqTr8+cDCQkoHtTdvUF3+Lj6KP6YpSkxTwqQB50nZycjIz/DxFWVz60O9WDWKewAT5eyJW+DQwejiW8TpOWl4bdzlWxtWIncwly8s+8dtFraChsvbSwOTC7PvIylQ5aiR4MeyCnMwfx/51dZKQUYFkrFpsfir0tUcpR1hUJREUoBNKPq6FF6vLt3gQceqHwHR1OI4dgbLm5AflF+mdY94dFH6XL/frlFVl9Lj1EgMbLVSLueIyVkZgJnz9L1qn6elBh2bk0jWo1AgEcAErMSseHiBqOOcSvjFtZfXA8AmNG1ZEWXPc+U0g237KF9T7TuNWlC1Z0Ch1KMMcYYs3UcStk5MVfKnH9wmnv3PaVCqSJtEcatG4cLty+U2P3r9d6v44l2T0AjafDYH4/h8p3Lyi3+nilTaCByRgbtxrc96t48qabm33UPkCs7TJknBQBeLl7wc/cDYL4d+CQv6jHxLSi/z9BB7VBczfTVccO2rJckCX9f/hutv26NN/e+ibyiPPRr1A9npp7BZ4M+g7erN1QqFT4b+BkAYNX+nYiPp7bC8PCKj2tIKPXNyW+gkTS4z3MMbsU6w8EB6NGj5G38/YG9e4ERI+jEffx4GoJuwKeql54hPRHoEYj0/HTsuLYL27bR+0XrntCkCdCuHVVaiOBKH2l5afjl3C8A5B3X7N3Ro9QS3aiR/Pu1IkoOO7cGZwdnPN/peQDGDzxfcXIFirRF6B3SG+3825X4mD1XSonjODgYPxPLku175c2TAjiUYowxxpjt41DKzlniD057ad+bs3MOtl3bVmb3L5VKhe+GfYceDXogLS8NQ38bipQcA8tBqqBWA8uX0+XvvwNbV7UBJGBQqPnnSd26Ra0bajXQrZvpx2vqS2c15mrhy3SlZMcpu+KSt0nhk+Dq6IqIxAgcjj2s13GvplzFw78+jEd/fxTRadEI9gzG76N+xz9P/YPWfq1L3LZb/W54vM3jQDxttxcWJlX6/Bah1OXLlW8qkF+UXzyfp2vhbABAx47lz0CrVQv44w9gNt0M//sfMFPhXEetUuOxsMcAAN9sPobERJo7Vl54aUwL36qIVcgpzEEbvzboHdLb5PXaAkOqDs0x7NzSpnSaAke1Iw7GHMSZRMM+kQJNAVacoud7eXOvCgvp0h5nSokKYWOrpADrVEpVFErFxSkfejPGGGOMKYFDKTsn/uCs6TOllp9YjiX/LQEA/DTipzK7f7k4umDD2A1o5NMI1+5ew6i1o0yaoVKe8HDgzTfpesH2d+G661u09+uo6GOUR1RJtW9f9QB4fZh7rlSSAw1xKrzrX+FtarvVxvg24wHI82oqkl2Qjf/t+R/aLGuDbde2wUnthNd6voZLMy9hbJuxFe5atvCBhXBI6A4A8G8eW+ljNGxIJ6eFhfLzsDzrItfhds5t1Peqj6yrFHjptu6V5uBAM8m+/ppCxa+/lk8ulSJa+PbsoGRgwAC5gkPXiBF0uX27fEJeGa2kLf7ezOwy0+w7TFqKPvOkBN32Pa3WbEsyqyDPoOLh9FX9rJW2/uJ6JGYlItAjECNajSjz8erQvmdKhbAtVUplZ1MlL2OMMcaYreFQys5Zsn3PmqFUURFw4wZdL/1H9+7ruzFzK5WYvNf/PYwOG13uMfzc/bDp8U3wdPbEvpv7MH3LdINaw/SxYAHwwLTNALTIO/wsxo5RK7pFeXmUmicliFAqKlX59r3cwlzcUh0FAKTfrjxBE/Np1kWuQ2JWYpmPS5KEPy78gZZLW+KDgx+gQFOAgU0H4ty0c1g4YCE8nD0qPX5Dn4YIyaKT8XOO36NQU1jhbdVqoGVLul5ZC59ogXq+0/M4sJ9+vVYWSgnTpslVbof1KwzTW/cG3RHsGYz8SzRwv3TrntC+PYVvubnArl1VH3dX1C5cu3sNXi5emNBugoIrtp6iImrfA/T7eWrenIKH7OzKw0pbJwae/3L2F6Tmpup9v6+Oyc93Z4eyyZMIpYxtfzOEudr3lKiUsmQoFRpa8v3u7rQRB8AtfIwxxhizTRxK2bma0r4XF0cnjM7OJee8XL5zGY/98Rg0kgZPtHsCr/d+vdLHauPXBr+P/h1qlRorT6/EZ0c+U+AzKOlu+wXAY2Ph6KTBhg3Agw8aPjzaEErNkxLMWSl14fYFaD1vAgBuJztU2tbSMbAjejTogUJtIb49+W2Jj0XejsSDPz+IMevGIC4jDg29G2LD2A3YPmE7WtRtoddaJAlIi6bPNdl7M745+U2lt69qrtTJ+JM4GncUTmonPBL0PK5epYHD+n5fxNwppUMptUqNh4OeAeIo9XroofJvp1IZ1sInAriJHSZWGQDai7Nn6fedtzfQunXVt3d0BNreG41mzy18vUJ6oa1fW+QW5WJVxCq97hORGIFDsYfgqHbElE5Tyr2NJSullG7fUzKUMnf7niRVXCkF8FwpxhhjjNk2DqXsXHVq38vIqLgFRlQhNG5MVSsAkJKTgqG/DUVaXhp6NOiBb4d9q1cL0ZBmQ4oHXc/dNRebLm8y9VMolpSVhNOJp4HW67BuUwZ8fCg06tlTrvRSUmqqfDKsdKWUOUKpiMQIoNYdqJ3oLK2qkyQxp2b5yeUo1BQiIz8Dc3bOQfvl7bEneg9cHFzwZt83cXHGRQxvOdygFrLoaCD1rhqOThrA/yze2vsW0vLSKrx9VaGUaH0a03oMLp6sB4Cqj0SVQlXMFUoBQEDS0wDUUAeeQW2/is/aRSi1aVPls7OiU6Ox5QpNRJ/eZbpyC7UyEfB2706tlfqw92HnAM3dE4Pqlx5fCq1UdS+i2HVxVKtRCPQMLPc21aF9z5RQylLtewkJtF4HB6p2LI1DKcYYY4zZMg6l7JyoGkpPl8MjJUmSPF/G3LvvSRKQlVX+bUrPkyrQFGDU2lG4dvcaGvk0woaxG+Dq6Kr3Y77Y7UU83+l5SJDw+J+PGzzgtyI7o3YCoCqfRwf54uBBoEEDGpDdvTtw+rQiDwONBvjuO6rm0GgorKtfX5lji1DqVuYt5BTqMVzIABGJEYAK8KpHw03i4iq//eiw0fB390d8Zjxe2PYCWnzVAp8e+RRF2iI80uIRRM6IxFv93oKbk+FnjseP02WHDmqEBTZDSm4K3t//foW3ryyUSslJwa/nfgVAQdq+ffR+fVr3hO403grnzik/++Xqf/Q91YZuxo6oHRXerlcvoHZtquwTAU15lp1YBgkSBjYdiOZ1miu7WCsyZJ6UUB2GnQPAhLYT4O3ijajUKOy4VvFzBABSc1Ox+txqAJXvulgddt+zh/Y9USUVElJ+qySHUowxxhizZY7WXgAzjZcX7aaVlUV/cDZX+PywoIBCD8B8lVJubtQGU1QEnLsZg7nHxiE1r+Rck9t//x+A53EkezVaLX0P2QXZiM2IhaezJzY9vgl+7n4GPaZKpcKXD32Ja3evYU/0HvRZ1QdBnlXs/66H5OxkAMDgpoMBUGh05AgwZAi1BvXpA6xbBwwyclM+SaJB1K+8Apw/T+9r3Bj48UeTl16stltt+Lr6IjUvFddTr6ONXxvFjn0mic7cA4M0SIsHYiufLw5nB2c81/E5vHfgveL2utDaoVgyeAmGNBti0lpEKNW1iwpDH/wEQ34dgi+OfYFpXaahiW/Zafq6oZQkUbubsPL0SuRr8tExsCPuq38fnttP7+/TR//1BAbS9zI6GvjvP2r7VEJREbB9+73FNtuKtRcuYHjL4eXe1tERGDaMnk8bN5YfquUW5mLl6ZUA5FlE1YUxoZQYdm7PlVIA4O7sjokdJmLxf4vxxIYnKv2dml2QjdyiXLTzb4eeDSr+YtX09j1RKWXu9r3KWveAkjvwMcYYY4zZGg6lqoHgYKrEiY9XPpTSrb4yVyilUlG1VEoK8M3h33Ak8UjZG92icqo015NIu3MJAOCodsTvo383OjRxcnDCH4/9gZ7f98TFOxeRka9MeYoKquLdrAD6/uzfD4waBezZAwwdSlVOTz9t2HFPnwbmzqVjAICvLzB/PjBjRvm7qZkitHYojscfx7W71xQLpbSStrgiLbSxCy6eqDqUAoCpnafiq+NfoUBTgPm952NW91lwcTT9ExahVOfOwODQwXiwyYPYdX0XXtv9GtY+trbM7UNDKbTJyqKTuwYN6P0arQbLTiwDQFVSKSkqXLhAHzMklAKohS86mlr4lAql/vuP2jy9fIqQUf8o/r58BrmFuRVWlw0fTqHUhg3AZ5+VDN8A4Pfzv+Nu7l009G5ocjBoS2Ji6Pvq4AB07ar//dq1o8vYWODuXao0s1czus7AshPLcDf3Lu7m3q3y9rO7z660Zbamt+9ZulKqqlCKK6UYY4wxZos4lKoGgoIolDLHH5wilHJyMu8OSiKU2nb+MFAX+HTgp+gU2Kn441PXh+MSgHdHTULvgY8CoOAk2CvYpMf1dfPF6edP49itY3rNUdFHgEdAmWHb3t7A1q3ApEnA6tXAM89QxVS7dlSB06oV7e5WXvAXG0vh088/U4WOszPwwgvA//5HwZQ5NK3dtDiUUkp0ajQyCzLh4uCCsKZe2AT9XrkP9grGpRmX4OzgDF83ZT5hjQY4eZKud+lClXOfDvwUHb7pgD8i/8ChmEPoGVKyAsTZmYKpS5eoWkqEUluvbsWNtBuo7VYbj7d5HNvujShr3RqoW9ewdfXoQc+PylrnDLWFRj/h4YcccNi3AW6m38S2a9tKBKe6Bg6kE/GbN6klTVQCAbTj4ZfHvgRAs6Qc1HoOXrID4mseHm5YAO/tLVe4nT0L9OtnluVZRGjtUFyaeQk3025WeVsvFy90COhQ6W0K721oaY/te6Jt3R4GnVcVSonWbg6lGGOMMWaLOJSqBsz5Kqi5h5wLYq7UndQCeAV7YUaXGSWqYZLvhRePdm+Dto2UfWwXRxf0bthb2YOWw9kZ+OknOkH46CNg82Z60xUSQgFVWBhdXr8OLF4sv9I+bhzwwQd0EmxOob7KDzuPSIwAQDsgNtRQmKFPpRQA+Hv4K7YOgIKl7Gx6Xou2vLb+bTGpwyR8d/o7zNo5C0cmH4FaVXLsXqtWcig1cCC9Tww4nxw+GW5Obth/r3XPkHlSgmgbO3qUgjN9h21XZutWuhwyRIX6/mOw6PAirL2wtsJQqlYtai/duJHedEOpo3FHcTrxNFwdXTE5fLLpi7MhxrTuCe3bUygVEWHfoRQANPJphEY+jRQ5ljUqpUTLuak/O/Y06JwrpRhjjDFmz3jQeTVgzh34LB1KIc8bw1sOLxFIpaVRWwxg/jDG3NRq4MMPKXRYsgSYOpXCi3q0WRtiYoAdO4DPPwemTKHb5uVRG9ixY8Bvv1nmayCGnUelRil2TBFKdQjoUFxlZK0ZJydO0GXHjiVPXt+9/124O7nj2K1jWHN+TZn7lR52fiXlCnZE7YAKKkzrPA0AioecG9q6BwBt2tCMuMxMFLcAmuLWLap2UqmAwYNpZ0AA2HRlU6VD7MUufBs3lny/CODGtRmHOrXqFL8/IQF4801gwQJg2TK637FjFDqKYMLWHTxIl8aGUoD9DztXmvjem7PKVnDV2edCiRBI/J9jygYftta+l5wsV68xxhhjjNkKrpSqBsQOfOZ4FVS0MJg7lPLylgCogHxvjAl7pMTHoqPp0s+PTtirg27d6E1XSgqFHeItMpJO6l56CXjkkbKzfcxJhFKKVkolRQC4F0rdyxz1rZRSmpgn1aVLyfcHeATgtV6v4Y1/38Bre15DWL2wEtVSHkE+ABrgxJksnEuKxpL/lgAAHm7+MBr7NkZqqhxMGFMp5eAA3HcfsHs3zZUS84qMJaqkunWjVsI6Uic09mmM6LRobL26FaPDRpd7v6FDKUA9c4Z+/ho3BpKykrD2As3a0h1wvn49BagpKRWvo25dGuQeGEhf83feoePbiowM2vUQMC6Uqi7DzpVmjUopgEIgY/7PiowE/viD3kQo7OVl/JpEpVRRkXKVj6Wlp8s/e03K7s8AgH7+nJwokEpIoIpcxhhjjDFbwaFUNWCJ9j1TXi3WR75DMgB/uBb548GmJSc8X79OlxX9wV1d1KkD9OpFb9YmQqmY9BjkF+UrMlhcDDnvENAB9e+dMN6+TSeQulUOllBRKAUAs7rPwjcnv0FMegw6fNOh5AfjOwI4iZNnc9BuuZwYzegyAwC1gEkSbTgQEGDc2nr0kEOpqVONO4Ygt+7RpUqlwpjWY/DRoY+w5sKaCkOpOnWo0mvvXuCvv4D/+z/gu1PfoVBbiG7B3dApqBMyMykw/eEHuk+HDhSoJSTIb4mJdCJ85w69nTsH7NxJLW4DBpj2uSnp6FFAqwUaNZJDfkOISqnISPp8LVEZZA8sGUo5OMjBiyFzpS5ckIOoyEj5/U5O1KL7/PPGr0n391p+vnn+HxVVUn5+gKdn+bdRq+l5ffMm/Z3AoRRjjDHGbIkNvVbNjFUd2vfiC2hHveYeXeDsUPIMpqaEUrbEz90PHs4e0Epa3Ei7YfLxUnJSEJtBZVHt/Nuhdm25ssHSc04KCuSKlvJCqVpOtbDs4WVo6N0Q/u7+Jd78GqbSjXL8UA+t4O/uj1GtRmFgUxowZUrrntCjB10ePmz8MQA6Cd61i64//LD8ftHCt+XKFmQVZFV4f9HCt2EDUKQtKt5hcGbXmTh8mEKoH36gCr5582iXP9G6999/1Iqal0fB49mz1JY6aBAdc8cO0z43pZkyTwqgMMvLi55bly4ptiy7Z8lQCpBDoKpCqagoajcNC6OW2bffpkDKyYl+VlatApKSaOafKf/vlA6lzKGq1j1B/J1grZZpxhhjjLGKcKVUNSBe2Y+Pp1f7lWyLsUQopdFqcD33NIC+CHEp26/EoZTlqVQqhNYORURiBK7dvVZmN0FDnUmiKqkmvk3g5UL9MA0aAFeuUAtfVSdUSjp3jk6WfX0rfk4NbT4UQ5sPLfdjjVZQxcGf90eid6n5+CKUMqZ1T+jWjYKeqCg6MfY3csb7gQP08xsQUHJYeXhAOJr6NkVUahS2XNmCsW3Glnv/4cOpQurgQeCnw9twK/MW6roE4sKacXj6Q/pd07Ah7QpZ+usgqNXUOlS3LtC2Lc3p2bGD3hYtMu7zMgcxnN7YUEqlomqpAwco8GzbVrGl2TVLh1JubjSPrbIZTv/8Q+2pIrhydqaKqMceozZpHx/l1uPoSD8DWq355krpG0rxDnyMMcYYs1VcKVUNBAbSSZFok1GSJUKpAzEHkK2mv5S9UL/Mx/X9o5spq6kvfcGVmCulO+RcECdJlp4rJVr3Onc2bk5X6WHnQmYmcOoUXTcllPLxAVq3putHjhh/HNG699BDJYNqlUqFsa0piFobubbC+zdsCISH0wn1olUXgTvN4PTDMXz4gSO0WuCpp2jmVEWBVHkefJC+5ufOmaey0xg3bshhothR0Rg87LwsMVTbkqEUUHGl1N69ciDVvTvthpqcDGzaRM9nJQMpwdzDzg2tlOJQijHGGGO2hkOpasDJieZJAMqf6FkilFp7YS3gkg4AyMwoOwmWK6WsQ8lh58WhlH+H4vdZawe+yuZJ6aOiUOrwYRpm3KiR/LkZS4kWvi1b6FK3dU8QLXxbr25FZn5mhccQLXyXNowAvjmNhCv14esLrFkD/Pijzq6ZeqpTh8JAQG4ttLbvvqM5YAMGmBZ8i2o0fUKpK1coLFyyxPjHswe21L63bx/9LOTm0tf+33+BJ580/Dls7JpspX2PQynGGGOM2Rpu36smgoKo1efWrZKtOqbSZ/e9nMIcfHfqO4xqNQrBXsEGHb9IW4Q/L/4JuPYDQDsJlfh4EbVKARxKWVpxKJVqnkopEdxYulLqxAm6VDqUUqJ1T+jRA1ixwvhQ6to1Cj4cHcsfKN7Ovx2a12mOKylXMHXLVDT2aVzucZIC/QG8ANxtBoCOtWqVfIJrjIEDKRjcsQN4+mnjj6OEwkJg5Uq6bspAa0CulIqIoJCroiq8PXuA0aOBtDR6zjzzjPmDEWsRoZSlBr+LSqnSVUkHDtCw/5wcmmu2fr28M565icfhSinGGGOMsfJxKFVNBAcDp08r/wenPrvvvbLrFSw9vhS/nvsVRyYfgcqAnqj9N/cjOTsZHl5aZKFsKBUXR8GUs7Nxu2Ix4ylVKZVflI+LdyjBsXb7XmamvNW70qGUmEukRCglZhudOEEVFoaeQG/bRpe9e5cfeIgWvnf3v4tfz/1a8YEkAAE9gdthmD4vBl++3dzkmXWDBgHvv0+VUkrPwDPU33/TDoEBAcCjj5p2rNataQe4O3do58Hyfl99/TXw4otUUQdQ1c7atcBzz5n22LbKGjOlgJKVUgcPUmVUTg4Fohs3Wna3T3NWSuXny78/OZRijDHGmL3iUKqaMNcOfFW17128fRHLTywHAPx36z+subAG49qM0/v4ay/QTJv+LTpiE8qGUqJ1r3Fj65681kQilLqRdgNF2iI4qo37dRF5OxJF2iL4uvqivpc8M8wa7XubN1MgEBpqfLWPCKViYoCsLMDDg054jx2j9ysRSjVtCtSrRzvXnTpF828MIVr3hgyp+Dazu8+GJEnIyM+o9FhFHX5FI/cwzBkw0agZXKXddx9tXX/nDn1uop3PGpbTry5MmmR6NY+bG9CiBe3iFhFRMpQqKqKh8UuX0r8nTKDbLlhAlWccSimjdPve4cMUSGVnU5WfpQMpwLyVUjduUFWeu7vcwl8R3d33KqvkY4wxxhizNKuGUvtv7seiw4twMv4kErISsGHsBgxvObzS++QX5eOdfe/gl3O/IDErEYEegVjQdwEmhU+yzKJtlDgBMlelVEWh1Cu7X4FG0sDH1QdpeWl4bfdrGN5yOFwdq/7Lv7h1D8Cj7ftVGkpx657lBXkGwdXRFXlFeYhJj0ETX+O+Cbqte7pVdNZo3/v9d7ocp39uWkadOnJgdOkShSpHj1IrWHAwBaimUqmohe+vv+jE2pBQ6vZtmpcDVB5Kebt649373zVtoUZwcgIeeIACgp07rRdKXbsG7N5NX2ulQqEOHSiUOnNG/tqnpgJjxtBjAcAHHwCvvUYVWm+9Rd/fK1eA5s2VWYMtsValVF4ebRIweDAFx/ffTz9L4uOWZM5B57qte1WFTOJvhPx82gWzTh3l18MYY4wxZgyr1p5kF2SjvX97LB2yVO/7jFk3Bnui92DlIytxeeZl/DbqN7SoY9p29dWBuUrzKwul9lzfg81XNsNR7Yh/n/4XwZ7BuJl+E0uO6je9d++NvbiTcwd1a9XFgDDqpcrIoFdxBRFK8c57lqdWqRXZga+8eVKA3L6XkiLPLjOntDRg+3a6PnasaccKC6NL0cKn27qnVAWCscPOv/uOwoAuXeR12hqxy92OHdZbw7ff0uXgwTScXgmld+C7coUqw3bvphbo9euBefPoORIYSI8N0ND46shaodS+fdQmmpkJ9O9Pu+tV1oJuTuZs3zNkZ1pXV6BuXbrOLXyMMcYYsyVWDaUeavYQ3rv/PYxoNUKv22+/th37buzD1glbMaDJADTyaYTuDbqjZ0hPM6/U9lk6lNJoNZi9czYAYFrnaegQ0AELH1gIAPjg4AdIzk6u8tiidW9Uq1Go40tFexqN/JiA/Ec3V0pZhxJzpSKSIgCUDaV8fOTnlSVOkjZupJPk1q2BNm1MO1bpuVJKDjkXdEMp3aC2MhqN3JI2fbpya1HaoEF0efgwBdGWlp8PfP89XTd1wLku3WHnu3cD3bpRMNWgAXDoEDCi1H91EyfS5Y8/ynOmqhNrhVI//kiBVN++1g2kAPO27xkSSgE8V4oxxhhjtsmupvT8fflvdA7qjI8PfYzgz4LR/MvmmLNzDnILy9n/+Z78/HxkZGQUv2VmVrz9uT0TpfmWmin145kfcSbpDLxdvLGg7wIAwIR2E9ApsBMy8jPw1t63Kj1uoaawuHVvTOsxcHenIcFAyRY+bt+zLlMrpSRJwplEKhspHUqpVJYddr5mDV2aWiUFlAyl8vOpfQ8A+vQx/dhCp07U6paYSLNj9LFlC826ql1bmc/TXJo0obleRUVyq6ElbdhAM62Cg4GHH1buuGLn0ytXqAoqLY0qpY4dK39X1GHDAF9fCgn27FFuHbaisJAuLT1TCqAh/5s3V75zrCVYqn1PHxxKMcYYY8wW2VUodT31Og7GHMT55PPYMHYDFg9ejHWR6zB9a8UlAQsXLoS3t3fxW5it9rOYSPyxeeeOsm0Coq1K95XmrIIszP9nPgDgjT5voG4t6glQq9T4dOCnAIAVJ1cg8nZkhcf9J/of3M29Cz93P/Rp2AcqFeDlRR/jUMp2mFopdTP9JtLz0+Hs4IyWdVuW+biYK3X5stFL1MudO7TbG6B8KHX8OJ1w+vvT8GqluLkBHTvSdX1b+MQg7cmTrTM/xxCiWmrnTuOPYWx10Tff0OWzzwKOCk5W9PenN0mitT3xBIVuAQHl397FBRg/nq6vWqXcOmyFpSulRMjdqxewdSttQmBtolLK2u17AIdSTDkaDT+PGGOMKceuQimtpIVKpcLqkavRNbgrhjQbgs8GfYYfI36ssFpq3rx5SE9PL36LjKw4KLFntWvLf/wqWS1VXqXUokOLkJCVgCa+TTCz68wSt+/bqC+GtxwOjaTB3F1zKzyubuue2NVNbF0vQqm0NBrICigzPJoZztRQSsyTal2vNZwdyp6ZiiHXr7xC7U3msn49/REdHq7MQGkRSl27Joddffoov6OVIXOlrlyhgEelAqZNU3Yd5mDqXKlx4yjsOXvWsPtdugTs3Uu7eT77rHGPXZnBg+l78MEHwE8/Vb3bm2jh27CBfudVJyKUMnVnQ33NmkVfxx07bCOQAsxXKaXVGj5zUXcHPsZM8fHHFAKvX2/tlTDGGKsO7CqUCvQMRLBnMLxdvYvf16puK0iQEJdR/l9ZLi4u8PLyKn7z9PS01HItSqWS/+A0Zyh1K+MWFh1eBAD4aMBHcHF0KXOfjwd8DEe1I7Ze3Yrd13eX+XiBpgAbLm0AQK17QulQKjqaLv38bOcEo6YRodT11OvQSlqD7y9CqfYB7cv9+Btv0E5sWVl0Mm/oUG99KbHrnq7gYMDTk4KuH36g9yk5T0owJJRatowuhwyxjxC3f3+qUoqKkis+9HXqFLVj3rkDPPaYYXOpVqygy6FD5coaJa1cCSQlyQPNq9KxI804y8uTW0yrC0tXSnl6AsOHW3eGVGnmGnR+6xYd09ERCAnR7z5cKcWUsoH+hKt2v7MYY4xZh12FUj0b9ER8ZjyyCrKK33cl5QrUKjXqe5nh7MLOiLlSSv7BWTqU+t8//0NuUS56NuiJUa1GlXufZnWaYUaXGQCA2TtnQ6Mt2WOz5/oepOalwt/dH71Dehe/v3QoxTvvWV8D7wZwUjshX5OPWxmGP7GKd97z71Dux2vVAv7+m7ZsF8HUkSMmLLgciYnyIPIxYyq/rb5UKrlaSszDUnKelCBCqbNnaXBzRbKz5XBsxgzl12EOnp5Az3t7VBhaLfXJJ/L1K1eAKVP0GwafmyvvdKfkgHNdDg5AvXr6316lAp55hq5XtxY+S4dStshcg85FkNuwof4tqCKE5VCKmSIvjzZzAKjqVN+NOBhjjLGKWDWUyirIQkRiRPGJa3RqNCISIxCTHgMAmLd7Hp7a8FTx7ce3HY86tepg4l8TEXk7Evtv7sfcXXMxqcMkuDnZ+AAVC1D6VdCtW4GUFLru6wucSjiFn878BAD4bNBnUFVSBrCg7wL4uvribNJZrIpYVeJjayOpdW902Gg4qB2K3186lOKd96zPUe2Ixr5UdmNMC19xKFVqyLmuWrVoh6z+/Sl4GTRIHhyuhHXrqNXlvvuARo2UO64IpQBqn23dWrljC0FBtGatloZlV+TXX+nnpkkTeVaTPTBmrtTNm8Ba+hWCr7+mE/I1a+RKscqsW0ctwSEhtvV1mjCBwqyjR6m9sLrgUMp87XuGzpMCuFKKKeP0aXkTg+Rk88+EZIwxVv1ZNZQ6EX8C4d+EI/ybcADArJ2zEP5NOBb8S7u5JWQlFAdUAODh7IFdT+5CWl4aOq/ojAnrJ2BY82H44qEvrLJ+W6Nk+96hQ8Do0fQK2FNPAUFBEmbvnA0JEsa3HY+uwV0rvX9tt9p4o88bAID5/84vrm4r0BRgw0Wq+x7buuTE6YoqpTiUsi5j50ql5qbiZvpNABW37wkimOrXTw6mKgthDCFa95TejU43lOrTh2YUmUNVLXySJA84nzbNfOswBzFX6p9/5JOcqnz+ObVNDhhAn+9HH9H7X34ZOHGi8vuKAedTpsi7fdqCgABquwSqV7UUh1Lma98zJZRKSTHPboCsZvjvv5L/FpXIjDHGmLGsevrSr1E/SG9KZd5WDV8FAFg1fBX2PrO3xH1a1m2JXU/uQs7/chD7ciw+HfQpV0ndo1T73rlzNG8lN5dOlL77Dth0ZRP23tgLFwcXfHD/B3odZ0bXGQitHYrErER8fOhjAMCuqF1Iz09HoEcgeob0LHF7DqVsU1NfOusxNJQ6m0QTqBv5NIKPq0+Vt3d3py3c+/ShGUEDB9LOdqaIjaWAVaWi2UNKKh1KmYsIpSoaBH/kCHDmDJ38TppkvnWYQ3g4tbplZurXtpmaSr+PAGDuvX0UXn6Z5ggVFND3ODW1/PueP09fQ0dH2/w6iRa+n382fldBW8OhlPnb9wwJpXx95ZBMydmTrGYRlcy+vnS5d6/VlsIYY6yasKPX1FlVlCjNv36dqlTS0mjeyx9/AFAXFu+kN6v7LDT0aajXsZwdnPHRACpj+OTwJ4jLiCtu3Xss7DGoVSWffhxK2abiSqlUw0IpfVr3SnN3B7ZsAXr3pufBgw9WXf1SGdHm1bu3/POhFN1QyhxDzgURSh05Qm18pYkqqccfpzZCe6JW0/cY0G+u1LJlND+rXTv5fioVzdNq3Bi4cYPCnfJmnIgqqUceAQIDlVi9soYOBerUobBA7OhozyRJrn6ryaGULVVK6W6IwjvwMWOJSqmZ9zZf3reP50oxxhgzDYdS1Yip7XuJiVSdkpAAtG1L7VS1agHLTyzHlZQr8HP3w2u9XjPomCNajkDvkN7ILcrFnJ1zsPHSRgAld90TvLzoMiMDKCqi2TEAh1LWZmz7XkRSBACgvX/lrXuleXjQPLNeveRg6tQpgw5RTOwMpHTrHkDPy969Kbxtb9inaJC2bSmsy8gAIiNLfiwp6V5wDPsZcF6avnOl8vOBL+51as+ZU3JnOx8f+jo4O9Pg/M8+K3nf7GyqQAKAqVMVWbbinJ1pthQgD623ZxqNfKLq5GTdtViTLVVKATxXipkmKYnCf5WKQikXF/qb8ZrhIycZY4yxYhxKVSO67XuGvmqVlkY7n0VFUcXBjh1Ump2am4q39r0FAHin3zvwcvEy6LgqlQqfDaIzxDUX1iAjPwPBnsHo3qB7mdvqVkrFxVEw5eIif17MOnRDKcmAJ5YxlVKCCKZ69qTn5oABhgdTUVHU/qdW03w0pTk4APv3AwcPmnc+kaMj0K0bXS89V+q776gapVs3oFMn863BnETF08mTwJ07Fd/ul1/ohKh+fWDcuLIf79QJWLyYrr/6asl2xzVr5EHwDzyg2NIVJ1r4Nm6suA3RXojWPYArpQBlQ6m7d+n3ImD4iza8Ax8zhaiSCgsD/Pzk/5u4hY8xxpgpOJSqRkR4k5sr/8Gqj9xcamk5cwbw96eKBdHe8v6B93E39y7C6oVhcsfJRq2rc1BnPNHuieJ/l9e6B5QMpUTrXuPG9jW4uTpq5NMIapUaOYU5SMxK1Os+BZoCXEi+AMC4UAoAPD2BbduofS01lSpqYmKqvp8gWvfuv5/+eLZn5Q07LyoCli+n6/ZaJQXQ75p27ShIr6htTasFPvmErv/f/1VceTN1KgVWGg1Vx92+Te8XrXvPP2/bv086dKCvRUGBPKDfXnEoRczRvieqpAIDqYrSEFwpxUwhQikRRonWdR52zhhjzBQ2/Oc5M5SbmzxTRt8WvqIiOnk7cIDa57ZvB0KpMAaFmkIsO0H7rH/y4CdwVDsavbYP7v8Aro701/nYNuX3UumGUuKPbm7dsz5nB2c09KY5Yvq28B2MOYhCbSF8XH2K72sMEUyFh1MVzciRFKLqQ5zUl1dVY2/KC6U2baKKwrp1lR/ibmmiha+iuVJbtgCXLtHvqOeeq/g4KhWwYgXQogWddD/xBFVgHTtGQZaoRLJVKpW8RnvfhU83lOL2PWUrpYxt3QM4lGKmG/K3RQAAQFhJREFUEUPO77uPLvv1o0ueK8UYY8wUHEpVM4bswKfVAs8+Sye3rq502aGD/PHzyeeRU5gDH1cfDAodZNK6Gng3wPYJ2/HziJ9xX/37yr1NeZVSHErZBtHCF5Uapdftvz7+NQBgXOtxUOkO/zGClxewYQMNgT55Epg+veo/fi9eBM6epda3ESNMenibIE4Arl6Vq3/EgPPJk+VqDHulO1eqvO/tokV0OXWqPHuuIp6eNF/KzY2O98gj9P6RI+2jYm7CBHreHjtWdoaYPRGhlJNTyflfNY05K6U4lGKWpNHIO+KKSqn77qOf8bg4+e82xhhjzFAcSlUz+v7BKUm0pfqPP9I8nLVry25rfzye/vroEtSl3HY7Q/Vt1LdEG19pHErZLkOGncdlxBUPtJ/RVZm+soYNqfJJraYKkmXLKr+9GHA+aJD97UhXHl9foHVrun7kCFUN7dlDJ/u2OrjbED17UoiUkACcP1/yY//9R5WcTk7ASy/pd7y2bYGvKRctrhq1l6+Tnx/w8MN03Z6rpX79lS49Pa27Dmuz1Uop3n2PGeriRSAzk1pGxf9HtWoBXbvSdW7hY4wxZiwOpaoZfXbgi4qiqgGxQ9X33wPDhpW93bFbxwBQKGUJ3L5nu5r60tmPPqHUNye+gUbSoG/Dvmjj10axNQwYAHz0EV1/6aWSg6x1SZJ5d92zFt0WPhHKDR0KNGpktSUpxtVVbgMp3cInZklNmGDYpgfPPANMnEjXW7SQZ5/YA9HC9/PP1GJtb1asoGHzAPD669Zdi7WZY9C5EqFUfDxVSzOmLzFPqkuXkpt76LbwMcYYY8bgUKqaqax9Lz2dqqNataLdnRwcgCVLgKeeKv9YIpTqGtzVPIstRYRSRUX0ihxg3B/dTHn6VkrlF+VjxakVAICZXWcqvo7ZsyloKiqiHfXKC1/PnqVKIhcX4NFHFV+C1YhQatcuuYLGngecl1beXKmoKGD9ero+Z47hx1y6lEKtNWvsq4Xs4YeBevWAxMSK52wZKimJfu/rznoyh99+k6vSXnuNfmZrMltr3wsMpJ+FoiK5FbgqGzcC771nnwEpU07pIeeCCPx5Bz7GGGPG4lCqmimvfU/s0tWsGZ2gFRbSCeDZs8CLL5Z/nKyCLFy4TbunWSqU8vCQTxxzcuiycWOLPDSrgm4oJVUy0OnPi38iOTsZQZ5BeLSF8omQSgWsXAm0aUMn7KNHlz3JFlVSQ4ZUPX/InohQ6tQpICODNiR48EHrrklJIpQ6cED++f/sM6rmeOghuV3EEG5uFIq0b6/cOi3ByYkqwwBlWvi2bqWfmREjqNXm7FnTj1meTZuAJ5+kasXp04EPPjDP49gTpdv3cnPl/9+NCaWcnGiXXUC/uVJpacD48cAbb8i7WLKaSQw5Lx1K9ehBc/BiYoAbNyy+LMYYY9UAh1LVTOlQatcuGl4+bRq9KtqyJZ2gbN8OhIVVfJxTCaeglbSo71UfgZ6BZl83QPOCdEMEf3/Dt7tm5tHEl/oo0/PTkZKbUuHtvjr2FQBgaqepcHIwz5Zb7u40+NzHh+Yr6c4Zqq6tewCFynXqyP+ePp1+ZqqLFi2ABg2oomT/ftpt8Ycf6GNz51p3bdYgWvj+/tv4+T+FhcArr1Dl1Z07FOqeOQN07gwsXKhs5cs//9AukBoNBVNffmlf1WnmonT7XnQ0XXp5lfx9YAhDhp3//LO84+kbbwApFf/6Z9VYVhZwgV6nLBNKubvT7xSAW/gYY4wZpxqd0jBAbt+7fp3mRA0cSH9I1K5NJwlnz1LVQVWO36Ih55aqkhJECx/A86RsiZuTG+p71QdQcQvfqYRTOBJ3BE5qJzzX6Tmzric0FFi9mk56ly+nuWgAcOIEPfdr1aJ5S9WJSiVXS7m5yaFFdaFSlWzh+/prOhnu1EmeWVKTtG8PhIdTJWBYGPD++3IFmT5u3KDNK8TOhS+8QO979FEKq15/nQbMX7pk+lqPHqVdDvPzgeHD6eexOgWmplC6fU+3dc/Y0M+QDVGWL6frLi5AaioFU6zmOXGCqlYbNCh/th/PlWKMMWYK/rOxmhF/bKamAps3U0n1Sy/RVvIzZ1Lpvj6OxVt2yLnAoZTtqmqu1NJjSwEAo8NGI8AjwOzrGTIEePttuj5tGm1VLaqkhg2rnlV2gwfT5cSJtCNfdSNCqc2bga+o6A5z59bcipuffgI6dqQdr+bPp2q5lSupGqkyGzdSoHX0KFUU/vkn8MUXQEgIVRn+9BP9rj12jG63eLHxQ6/FCx3Z2dRO+vvv9P8OI0q27129CrzzDl0PDTX+OPruwHfwIBAZSb9L//iD3vfNN0BEhPGPrQRJoq/FqlW0g3AlHeVMIaJ17777yv84z5VijDFmCg6lqhk/P6qKAqhS5Px5OuEQ79OXpYecCxxK2a5QXzoLirobVeZjKTkp+PU87QFvjgHnFfnf/6hCo6CAdpT87Td6/7hxFluCRT3/PLVJff65tVdiHg88QBU2165Ru3GjRsCoUdZelfW0aUNh6+rV9LWIjweefZaqqLZsKXsynp9PL0KMGEGzgLp1A06fpp8NQaWi9rrz56mSNi8PePll4P775dYwfV25QkFUWhpV8W3YIIcwjOhWShkbnkgShZHh4VSx4uNj2gB5fSulxAypxx+noH/MGAovX3zRskFQfj7tOrpoEVXi+fsDzZtTOP/MM8C2bZZbS01V0ZBzoWdP2jwnOhqIjbXcuhhjjFUPHEpVM2o1DQo+epSGzrZoYfgxkrOTcSPtBlRQoVNgJ+UXWQkOpWxX09o0VfdaatlKqR8ifkBeUR46BHRA9/rdLbYmtZqqPpo3p1f94+Np1oqoKKpuHByA/v0BZ2drr8Q8fH1pELcwaxZX3ajVNGj60iUa/O7rSy3ZQ4dSkHScOq1x7RoFQ198Qf+eM4f+L2jUqPzj1q9PswWXL6dKmH37gLZtgRUr9AscYmKAAQOA5GSaW7hlS/WsTjSVCOm0WuNmeKWkUDD77LNUjdavH1WnVRQO6KM+dWJXGkrduSNXR4ndFBctotbhAwfkqlRzkCRq4X3lFQo7vLzo8pVXgL/+osDa2RkIuFeQu3Gj+dbC6PtR0ZBzwdOTWq0BbuFjjDFmOA6lqqGwMNP+YBXzpFrWbQlvV+8qbq0s3VDKmJ2FmPlU1L6n0Wrw9fGvAQAzu8yEysK9Vt7edFLi4UH/Hj5crk5g9ke08Pn6ApMmWXcttsTFhSqaoqLo5NzFhVplunalKpaOHWlnxjp1qP1x0aKq27VVKqq+O3sW6N2bQo/nn6eT/VatqFVn0CCqkHnuOarOeecdYMkSCqRiY2nzjJ07qXqHlaX7u8jQFr5duygo3LCBvpcffwzs2UNzfUyhT6XUqlVUgdq5sxw2hIQA8+bR9blz6fliDps20QsLixZRhVRBAVCvHv1uX7QIOHSIdiAVO1Nu3swtfOYUG0u73To60u+ZinALH2OstFdeoQ0zzPX/Bas+avhr0Kw81mrdA7hSypZVFEptu7YN0WnR8HX1xeNtH7fG0tCqFbB+PfDJJ/JJE7NPzz1HLYpTpnDlTXl8fYGPPgJmzKCh0z//TCflAAVLv/4qV8Loq0kTOpFcsoQGoCcn01tVGjak4KRePYM/jRpDt50xP58qSqqSl0e/xxYvpn+3bEnf1/BwZdZUVSil1VLFHEAhpa45c2iQ/Y0bwIcfAu++q8yadIldN/v3B55+mqqkyhvs3q8f/Y5ISKBAtpNlC7trDNG6164dbSJSkb59KTTkSqnq59ln6W+s0FD6faT71rQpt22z8sXE0O8ElYoqu/lvOlYZDqVYGcfjqVLK0kPOATmUcnEBAgMt/vCsEk19qXTtTs4dpOWlwcfVBwCw9DgNOJ8UPgm1nCr5i9XMHnyQ3ph9Cw4G9u+39ipsX0gIDXmeNYsqaMLCgFdfNb7dUa2mSqxnnqG5MOnp8ltGRsl/p6fTH5fz5xsegNU0ajVVORUW6lcpdf48tWueO0f/nj6d/qivLAwwlAil0tPp1evSJwr//kuDxL28ys7nc3MDPv2UWgoXLaK5Tkq+gJSaCmzdSteXLKFKsYq4uNBctA0bKJjlUMo8qmrdE3r1kmcC3rolP8+Yfdu+nWbaAdQuLlrGBbWafge0bEkvEE6ZYtpGDKz6WLuWLvv04d8HrGocSrESJEmyiUqpxo15S3Fb4+niCX93fyRlJyHqbhQ6BXXC1ZSr2H5tO1RQYVrnadZeImM1Tvv2NAhdKb6+1XNnR2tydaVQKj+/8tutWEFDxPPzadOS778HHn5Y+fV4eVG7c1YWhQfNm5f8+PLldPnkk3JbtK4RI2hTgj17qKVzwwbl1rZuHbXrtW1beSAlDBtGj79pE/Dmm8qtg8lEpVRFO+8J3t5UzXfyJFVLjR9v/rWZ240bVBFaU3eALSykFysAqmJ+6CGab6j7lpFBQeS1axQO//QT7ewaEmLdtTPrq+6bDzFl8Wk/KyE6LRopuSlwdnBGO/92Fn980QbSrJnFH5rpoXQL37ITywAADzV7qHgQOmOMMZlobamsUurAARoonp8PDBlCc77MEUgJ4lXruLiS709MlAeHl27dE1QqqmJycKDb7tql3Lp++YUun3hCv9sPGULrOXmSNrpgyiospK8toN+sUjFXqjq08H3zDb1A+s471l6J9Xz9NQVPdetSRe6IEdRa/OOPFFampdHP3T//0G1btwaSkmhX5Kwsa6+eWdOVK9RW7eAAjB5t7dUwe8ChVDX09+W/8ceFP4y6r6iS6hDQAS6Olm8SHzWKBqjW5D8CbJluKJVdkI3vT38PgAacM8YYK0sMO68olMrOpjY4SaIZSps3A/7+5l1TRTvwff897RLYo0fllUqtWwMz7/3af+klCi9MFRNDrbsqFfC4nuMJ/f3lHTu3bDF9Dayks2fpeevrq9+LhdUllNJqKYQBqE01JcW667GGO3eAt96i6++/X/5mFioVjdro3x+YNo1ab/38gDNnqNJSq7Xkipkt+f13unzwQQo1GasKh1LVzNoLa/Ho749izLoxuJJyxeD7F7fuBVm+dQ+g//Q+/pi2GGe2R4RSUalRWH1uNdLz09HUtykGhQ6y8soYY8w2iVCqova911+nXRXr16cKJEu0CpU37FyjkQecT51a9THeeotONi5eBJYuNX1NotWjTx/DdhgcOpQuxcB/phzRute1q34jFXr3pufv5cs0gN5e7dwJXL9O17Oz6eeyplmwgCqh2rcHJk/W7z4hIVQ96exMl/Pnm3GBzGZJkvz7XN8XGBjjUKoaOX7rOJ7e+HTxv42plhKhVJdgyw85Z7ZPDDu/evdq8YDz6V2mQ63iXyWMMVaeytr39u0DvviCrn/3XckdaM2pvFBqxw7g5k2gdm392i18fICFC+n6m2/qt2NjZcRsNH1b94Rhw+hy1y4gN9e0NbCS9B1yLvj6UogBKL9hRXIycPu2ssesyNdf06WoFvziC9oYoKY4d47aFwHaBdTBQf/7du8uD0ZfuJB2iGU1y9mz1Pbp4gIMH27t1TB7wWeS1URseiwe+f0R5BXlIdCDtq1bG7nWoGMUaYtwKuEUAOsMOWe2T1RKHYk9grNJZ+Hm6IaJHSZaeVWMMWa7KqqUysqitj2AhggPsmDBaXmhlDgJffpp2mVPHxMnAh070rDj//3P+PWcPUsnws7Ohs8fadeOKqtyc2nnQKYcfYec6zJHC19qKrWMhoWZf3bYjRty1d3atfSY6enAV1+Z93FthSQB//d/1Ho3ahTQr5/hx3jiCaoABYBnnwUOH1ZyhczWida9hx+mjTUY0weHUtVAVkEWHvn9ESRmJaKNXxscmXwEjmpHnE06i0t3Lul9nAvJF5BblAsvFy80r9O86juwGkeEUhpJAwB4ot0T8HXjrboYY6wiFc2UevVVIDqaWl4++cSyayodSsXGyifiFQ04L4+Dg1zptXIlcOKEcesRVVIPP1z+7JrKqFRyC9+mTcY9vjnl59POZHv20MyuN98EnnkGGDBA/trZort3aVgxIM/t0ocIMfbuVW4tP/9MM47u3AGmT6fgxFy++YaOP2AA0LKlHLZ+/nnNGN791180uNzFheZpGevdd2kwekEBVcvcvKnYEpkNkyQ5lOJd95ghOJSyc1pJiyc3PImIxAjUq1UPmx7fhIY+DTGw6UAAhrXwFbfuBXXhdixWLl83X9R2q1387xldZlhxNYwxZvvKa98Tu1UBFOZY+tXk0rvvffcdVUb07w+0aGHYsXr2pMoISaKh54YGBlot8OuvdH3CBMPuK+jOlTJnYKGPkyeB8eOpjSkoiKrOmjWjkGPyZNrI5ccfKaR66SXbre46Rn8SolkzoE4d/e/XuzddXrxoeksnQN/P5cvlf//1F1UwmUN+Pv0sABR+AcCYMUBoKA07F9WE1nT1Kn2NxS6ZSsrPB2bPputz5tDug8ZSq4GffqIZsbdvU5ttZqYiy2Q27L//qNrQw8O8O8iy6oeTBzv3+p7XsfHSRjg7OGPjuI1o5NMIADAmbAwAw1r4ioecc+seq4SoluoV0gvtA9pbeTWMMWbbSrfvZWYCkybR9alTKaywNLH7XmJiyRNxQ6qkdH34IVCrFrXpGBoY7N9P4Zi3t/EnMfffT48fF0c7f1lLfDwwcCAN+T16lIZ9SxIFUy1bUovmlCm0m9nIkXSfiROp/dHWiNY9fedJCXXqyLOYlJgrtX8/BVzu7sCsWfS+F14wz3ypdeuoGis4WJ5V5ugot6J98on155a98w5w8CDtdqf0WhYvpgHvgYHAa6+ZfjwPD+Dvv2mXzHPnKLzWaEw/LrNdYsD5o4/S72TG9MWhlB1bFbEKHx36CADw/SPfo0eDHsUfe7Tlo3BSO+F88nlE3o7U63jH4uVKKcYqMqDxAKhVaszrNc/aS2GMMZtXulJq7lxqZWnUSN523tL8/Kj1TqulQCo+HqhXj9ptjBEcLJ/EvvqqYSfLonVv9Gg5wDOUq6sc7llrFz5JorDx7l2qDlm3Djh+nKqFsrMpWNm+naptXn+dqqWaNKHnwssvW2fNlTF0yLkuJVv4RJXU+PE0OLttWwqkXnrJ9GOXJqoXn3+ewijhiSeozTYxkVowreXOHeCPew0QiYnKVm4lJADvvUfXP/qIAiUlNGhA1W0uLhRQiYCPVT8ajfyiBLfuMUNxKGWnDtw8gCmbpgAA5veejwntSta8+7j6YFAoTU1de6Hqly2zC7JxPvk8AK6UYpV7p/87SJydiCHNhlh7KYwxZvN0Z0rt2iWfSH7/PeDpaZ01OThQNQRAVTsAtZY5Oxt/zNmzqQLr5k2av6OP/HwKbwDjW/cEUdlirblSS5fSDoaurtSOOGoU0LkzhX0qVdnbe3gAq1bRx77/3rbmYUmS3L5nyJBzQalh58nJwJ9/0vVp0+j5uXIltYb99puyX7OICKr0c3SkjQd0OTnJoetHH9GcJGv48Uf6mRG/Uz76SLlqqf/9j2Zmde1q+s9iad26yWHexx/T58Gqn337KCz19aWKUcYMwaGUHYq6G4URa0agUFuI0WGj8Xb/t8u9XXEL34W1kKoYsnA68TS0khZBnkEI9gpWfM2s+nBQO6Ceez1rL4MxxuyCOIG8fZuCHwCYMYPmN1mTmCuVkEDBSOkTcUPVqkUnyQBVtCQkVH2frVuBtDQKs0SQYSzR+nfsGJCUZNqxDHXpElXAATQculUr/e7Xu7c8w+e556gSxhZcu0YVXy4utLuhofr0ocvz5037nH74ASgspKAkPJze16ULzTsCqP01Lc344+tatowuR40CAgLKfnziRApyY2NpVpKlabVyoP3pp1RpqVS11IkT9LUGgCVLKPRT2vjxwPz5dP2556hykFUvYsD5qFGmvcDBaiYOpexMel46hv02DCm5Kegc1Bk/Dv+xwqHkj7R4BM4Ozrh45yIu3L5Q6XF5nhRjjDGmPNG+t3gxndA2aUIzmKwtWOf1p4EDaV2mevxxqorIypJPQCvzyy/y/Uw9EQ4MpMokANiyxbRjGaKggNq78vJoZtQMA/f/ePddoHVrCtKmTbP+oHZAbt3r2NG4k8t69ehzAoADB4xbg24IM3VqyY+99RYNYI+PlwMqU6Slyc9FMeC8NFdXOXhcuBAoKjL9cQ3x77805NzTE3jqKfnny9RqKbFBAUDPY2Mq4/T19tvA4MEUNH7wgfkeh1leQYFc9fr449ZdC7NPHErZkSJtEcasG4OLdy4i2DMYf437C7WcKp4i5+3qjcGhgwFU3cJXHEoFcSjFGGOMKUVUSuXk0OUPPyg3r8UUuqFU6ZN+Y6lUFL4B9HmePl3xbdPS5PlPSrUL6e7CZynvvEM77tWuTS1K5bXqVcbVlSpvHB3ppE4MCrYmMeTclIBCVL4ZO1dq1y4gOpoG4I8dW/Jjbm7UxgfQ5a5dRi8TAH39c3IoSBO7B5ZnyhSgbl0aBi6qQixFVHI9+ST9/njqKWWqpX7/ndoWa9Uyf1iuVstzq377jb6OrHrYtQtITaUqQ1OrXlnNxKGUHZm1YxZ2Ru1ELada+PvxvxHkGVTlffRt4ROhVJdgHnLOGGOMKUVUSgHAiy/KrU3WJnbgCwqSwxwl3HcfvVIuSTTAu6I/Pf78k15db93auBax8oi5Ujt3yoPlzenwYaqaASgYCKr6z7JydewILFhA12fMAG7dUmZ9xjJ25z1dpoZSYsD500+Xv4tX797AzJl0/bnnqDrPGJIkDzifPr3yUFF3B8D336dqLkuIjwc2bqTrIkB2cqI5UIDx1VI5OcArr9D1118vGVSbS6dOVC2l0cjtvsz+iTB9zBiaWciYoTiUsiN9G/aFu5M7fhnxCzoGdtTrPsNaDIOLgwsup1zGueRz5d7mdvZtRKdFAwA6B3VWbL2MMcZYTeflRZehobbVsjJqFIVBn35acqcxJXz4IVUA7dsnn0yXJtqlnnjC8OqiioSHUzCUnW36kO2qZGZS1YpWS1Uro0ebdrx582heUloazR4zpI1PkpRr+8vNpaHfgGmVUv360cnp2bNyW4++4uLkIebPP1/x7RYuBBo2pOH6xu7q9u+/wOXLVH30xBNV337GDMDHh+aIiSHs5vb99xTi9OxJuw8KplRLSRKFcHFx9DUUYZsliDBt1SrrB7AAfS0++wxo355C8tatgbAw+a1VK/ktLIxeXEhPt/aqbUdODu2wCPCue8x4HErZkVFhoxD9UjRGtNJ/z2YvF6/iXdIqauE7EX8CANCiTgv4uPqYvE7GGGOMkWeeAV54gbZDd3e39mpkTZsCZ86Y5yQiJESe9TNnDu0YpisuTg6NlJw/olLJVV/m3s3u//6P2o8aNgS++ML04zk6UhuZqyvt4qdPyBATQ3OO6tYFBgxQZie206dpXpK/P30fjeXnB7z6Kl2fOpWCE32tXEkhTJ8+FAJUxMMDWLGCrn/5JXDwoOHrFFVSTz0lB8iV8fKSZzC99575Z4BpNPLnWLrN1tnZ+GqpxYtpFzwHB/p6u7kpsly99OpF39uCAgrFrSk/n773s2dTgBoZSW8XL8pvly7Jbxcv0nOtTRtg2zbrrt1WbNlClYoNG5p3Jhmr3jiUsjPG7Ho2pnXlLXw85Jwxxhgzj6AgCi303ZGtunj1VRo+fv06ncTp+u03Opnv3ZtOZJSkO1fKXIHBxo3y/KiffqK5R0po2VJuB5wzB4iKKnsbSaK2wTFjaDj9J5/QTnn//EOzuTQa09Yghpx362Z6Bdubb1L1SUoKtdjp8/0oKgK+/ZauT5tW9e0HDgQmTaLrkycbFszcuiVX8unzWMKLL1Igdvas+eeXbdtGGyTUrl1+NZ4x1VI7d8qh8aefAg88oNhy9SbCtG++sd6uk8nJwP33U9WmgwP9LP37b9m3vXvltz/+oEA/Lg4YMoRedLh71zrrtxWidW/cOOWqXlnNw6FUDTC0+VC4Orri6t2riEiMKPPxY/EcSjHGGGNMOR4ecrviu+/SCaCwejVd6tMuZagHHqBqo5s3gfPnlT9+YiIFLABVKSk9I+zFF6n1LTub5imJkKmwEPj1VwqLevakk2ONhj7fzz+nqpkNG0zfjU6JIeeCszPw8890uXkzDb+vyubNFBbVqweM0LMx4NNPKQC9coV2eNPXt9/KFVlt2uh/v9q15V0WzV0tJWZrTZwob5qgy9BqqatXaXC8VkvHfPFFZderrwcfpPlSOTny5giWdOEC/SwdPkyh8vbtVC3Vr1/Zt7595bfRo6nC9OWXKYD58Udq96uoTbm6S08Htm6l67zrHjMFh1I1gIezBx5u9jCAsi18kiTJQ86DeMg5Y4wxxpTx1FM0xDsjg6pmAAqKzpyhQc2mzmEqT61acuWH0lUskkTVOHfuUAXQO+8oe3yAdihbtQrw9AQOHaKQZeFCqoaZMAE4fpyG50+eTJU6u3dTK+GPP9L9Fy827SRfiSHnutq2pVASoLa3Gzcqv70IYSZNKrlJQGV8fOT7LVpEoWdVFWOFhXJb3PTp+j2OrlmzqOXt2DH6HpjDjRvyCX9ls7X0rZbKyAAefZTmlnXvTjv6WauyRaWSw7SvvrLsjKbt2+nzv3GDqp6OHqX2V325u9MMqkOHqLoxMZEC1HHjgNu3zbZsm7RxI7VAtmyp3IYVrGbiUKqGKG7hiyzZwncj7Qbu5NyBk9oJ7QPaW2t5jDHGGKtm1Go5IFmxAjh3Tq6SGjKEKk7MQezCp/RcqW++oZDAxYU+D31DE0M1bCh/3d59l4Z4x8fTnKd33qFZUt99V3Lo9bhx8m5ms2YB69cb9piSRG2WN29SYNBZwX1vZs+m6q6sLGp3qmjXuuvXaZ4WAEyZYthjPPIIVWpotVSB17Ilfb8q2oVx40YgIYG+pvpWZOny85PX+M475tmJ79tv6fsyYADQrFnFt9OnWkqjoVDz4kXaZW/9evM9f/X16KNUZZSeDixdapnH/PJL4OGHaaOCPn0ohG3Z0rhjde9OM9hee43a/9asoRloa9aYf9aYrfj9d7p8/HFu3WOm4VCqhni42cNwc3TD9dTrOJVwqvj9okqqfUB7uDqWUxfMGGOMMWak3r2pIkqrpbDk11/p/RMmmO8xH6bicBw9qkzlQm4uVeCIHco+/JBOps1p4kRg5Ei6Hh5Os6tu3gTeeIMCkfLMnUuzkSSJvr5iPlRVUlPpsUQr13PP6Tf0W18ODlTJ5e5OA+6XLCn/dqJyadAgmpdlqO++AxYsoLDz2jUaDN6oEVWapaWVvK0YcP7ccxTqGGPuXLrvwYMUUJk6z0tXQQENIAfKDjgvT1XVUm+8QZWDrq4UyAUEKLdWY6nVtOskQC2o2dnme6yiImq5fPFF+l00aRKwaxdQp45px3V1pefX0aMUEt+5QwHxyJElW5aro9u36WsI8K57TAFSDRMbGysBkGJjY629FIt7bO1jEt6C9MrOV4rfN3vHbAlvQZq+eboVV8YYY4yx6ioqSpKcnSWJ4hJJ8vKSpJwc8z5meDg91qpVxh+joECSli+XpKAgee0PPSRJGo1y66xMYaEkXbsmSVqtYfd5+GFaa926knT1auW3P3xYkkJC6PbOzpL0xReGPZ4hli+nx3FxkaQLF0p+LC9PkurVo49v2GDa42RmStLnn0tSgwby983TU5LmzpWkW7fosQFJcnCQJFNPB376SZLUajreuHH0nFHC2rV0zIAA/Y/57bfyfXR/vn79Vf46rF6tzPqUUlgoSU2a0NoWLzbPY6SmStLAgfQYKpUkffyxeZ7j+fmS9OabkuToSI8VGChJe/cq/zilXbggSS+/LElvvGH689kQy5bR59mxo+Ue05JqcmZgDVwpVYOU18LHO+8xxhhjzJyaNKHBwMKoUebfgl53Fz5DabW0o1RYGFWpxMcDISE0qHvTJqrwsARHR5p5Y0hbjKMjtdR06kRVGw89VP7uZlot8PHHVMkWE0OPc/gw8MIL5mvDmTIFGDyYZtA89RTNdRI2bKDKi+Bg+XtnLA8PmrMVFSUPos7MpGq3xo3lCrRHHgHq1zftsZ58ktq1xNd99OiKWwYNIWZkPfsszV/Tx1NPUeunbrXUyZPy7oSvvAKMH2/62pTk6Eg7dQL0/cnPV/b4589Tm93OnTRvbsMGqnAzx3Pc2Rl46y3gxAlqCUxIoN393nlH2So6gCLG/fupVbl1a6o0e/dden6PH0+zzsxNd9c9xkxm7VTM0mpy6pldkC25v+8u4S1Ix+KOSYWaQqnW+7UkvAXpQvKFqg/AGGOMMWaE9HRJ8venV9b//df8j3fsmFwhk5+v3320WknavFmS2reXK0vq1ZOkJUuokseeJCRIUsOG9Dl0716yciYpSZIGDZI/x3Hj6PtjCXFxkuTrS4/71lvy+/v2Lfs+pWg0krRpkyT16iV/zoAk7dql3GNs3kwVYIAkPfigJGVlGX+sS5foOGq1JN28adh9V6yQq6WuX5ek+vXp30OGSFJRkfFrMqe8PEkKDqZ1rlihzDE1Gqq8Et+T+vUl6dQpZY6tj6wsSXrmGfm5dv/99DNpqqIiSfrzT0nq1k0+tkolSSNGyD9D4q1HD6q4Kyw0/XFLi42lxwUMf47ai5qcGVgDh1I1zLh14yS8BWnOjjnSmcQzEt6C5PmBp1SksdH/qRhjjDFWLVy6ROGAJWg0dGKub/iwf3/J0MLLS5LefZdawexVZKQk+fjQ5zNqFH1N/v2X2ooASXJ1pZYvc7XrVeS33+T2uePHaZ3i33Fx5n3sQ4ckaexYSZoxQ/k2zH/+kSR3d/pcevWSpLQ0447z8st0jGHDDL9vfr4cRorwr0UL49diKZ9/Tmtt0sT0ECU+Xm7XA6idNTFRkWUa7McfJalWLVqHn5/xQWhODrW/hobKn5eLiyQ9/7wkXbki3+7UKUl6+mlJcnKSbxcSIkmLFlEboykKCqjd9/33JalLFzp2z56mHdOW1fTMwNJUklRT9gcgcXFxaNCgAWJjY1Hf1JpdO7Th4gaMXDsSId4hmN97PqZsnoL+jfrjn6f/sfbSGGOMMcYU8+yzNCw6LIzamvLyaGh5eW9iyLKrK7Wwvfqq6UOQbcH+/cCDD9Lg7N69aRt7rRZo1QpYuxZo08Y66xo7lh6/ZUvaBW3FCmD4cGqvsmdHjlDLZHo6tVDu2GHY8yg3l1oYU1OBLVtol0pDffutvDOgtze1cjVvbvhxLCk7mwa137kD/PKL8RshbNxIP/cpKdQi/Omn1IJrzZ3hLl0Cxoyh3UdVKtpN8623qHWxMkVFtFviX3/RroFicLqvLw1tnzmTdo8sT2IisGwZvYnNHtzdaWfK9u2pbTU4mN7q1Su/JVmjAc6cAf75B/j3X/pdkpVV8jY//UQtrNVRTc8MLM2qodT+m/ux6PAinIw/iYSsBGwYuwHDWw6v8PZ7b+xF/x/7l3l/wuwEBHjot41ETX+C5Rbmwu8TP2QVZKG9f3ucSTqDV3u+ig8HfGjtpTHGGGOMKWbLFv3nEzk6ApMn0y5lwcHmXZel/fZbyVlCkyYBX3xBJ6nWkpJCgVhiovy+7dtp5z17d/o0MHAgBSxt2tAOZfrudvfTT8DTT1OIGhVFOxcaqqCAHvf6dZqpNniw4cewhvffB+bPpxlJZ88aNrstO5vm1n37Lf07PBxYvZrCV1uQm0vrE7O+evemnUjFqWh2Nn3OERH0/Dl9mkIs3RlbISG0A+jkyTQ3TR95efQ4n39O87XK4+QEBAXJIVVQEO30uW8fhaO6atcG+vWjWVkPPEChcnVV0zMDS6siozWv7IJstPdvj0kdJmHk2pF63+/yzMvwcpH3qvVzr2BvXFaGm5MbHmnxCH499yvOJJ0BwEPOGWOMMVb9DBlCQ6jv3KGqidJvrq7y9bp1qaqkOnr8ceDuXWDpUqrSeOIJa6+IqodWrgQefpj+3bgxVXRVB+HhVFXywAMUBPTpA+zeTaFCVcSA8ylTjAukABq4ffgwVWs1bWrcMaxhxgwavn/hAvD331Q5p4/jx6my6upVqkSaO5eGfjs7m3W5BnFzo+9tv370vT1wAOjQgZ7zERHAlStUwViapydV3D33HPDYY/oPvRdcXSmEnjgR2LOHKsni4ujt1i0gKYk2HLh5k95K8/Ki5+/99wP9+wPt2lluowdWs9hM+57qbZXelVKpr6bCx9XHqMfh1BP469JfGL5mePG/Y/4vBg28G1hvQYwxxhhjrMaZPp1ajL74gtomq5OoKAqmbt6kQGrOHKBBA6qOqV8f8PMreYJ/5gwFFY6OQGys/tVV1cn//gd88AHQuTO1HVbWdldQQDv2vfUWtbrVr0+VZv3LNtXYlGvXqH311KmS7w8IoEBTvHXoQDuXmjMEKiykakURUom3OnXo69ixY9VthtUVZwaWZZdPsw7LOyBfk482fm3wVt+30DOkp7WXZFcGhQ6Cp7MnMgsyEeARgPpe/IPGGGOMMcYsa+lSmo1jK21WSmralCpiBgygSpgXXyz5cScnapcSIZWoVBkxomYGUgDwf/8HLF4MnDhBVVPe3jRLSfctKYku796V7zd2LIWbvr7WWrn+QkOpku2774CMDDmAssb33MmJgtIGXJugmKVLKSxNTKT5XV9+CXTlpqQq2VUoFegRiOUPL0fnoM7I1+Tju1Pfod+P/fDfs/+hY2DHcu+Tn5+PfJ2G3MzMTAut1na5Orri0ZaP4pezv6BrcFeorDn9jzHGGGOM1UgqFQ2ir64aNKBgavFiCqZE61RCAlWp3LhBb7qmTrXCQm1EvXrU3rZ4MfDaa1XfvnZtuu0TT1h3mLmhXFyoXZFVL2vW0Nyv5cuBbt3ouTloEHD5MlVGsorZVSjVom4LtKjbovjfPRr0QFRqFD4/+jl+HvFzufdZuHAh3n77bUst0W7M7z0fSVlJmNtjrrWXwhhjjDHGWLXk50ctabp026bi4qhdLzaWBpzbevuZuc2bR0O+c3Loa+fvT5el3/z9KZTiGUfMVnz2Gc3/mjiR/r18OW248f33+oWsNZldhVLl6RrUFQdjD1b48Xnz5mHWrFnF/7516xbCqvNLMnpqUbcFdj6509rLYIwxxhhjrEbhtqmK+fnRYHjGbEFmZiYyMjKK/+3i4gIXF5cytysoAE6epFBVUKupfffIEUus1L7ZfbYckRSBQI/ACj/u4uICLy+v4jdPT08Lro4xxhhjjDHGGGP2JiwsDN7e3sVvCxcuLPd2d+4AGg1V8Ony96eqSFY5q1ZKZRVk4drda8X/jk6NRkRiBGq71UaIdwjm7Z6HW5m38NOInwAAi48uRmOfxmjt1xp5RXn47tR3+Cf6H+x8git+GGOMMcYYY4wxpozIyEgEBwcX/7u8KilmOquGUifiT6D/j3Lj9Kyd1Gb3dPunsWr4KiRkJSAmPab44wWaAszeORu3Mm+hllMttPNvh91P7kb/xjW8+ZoxxhhjjDHGGGOK8fT0hJeXV5W3q1sXcHCg3SF1JSXV3N00DaGSJEmy9iIsKS4uDg0aNEBsbCzq169v7eUwxhhjjDHGGGPMRhiTGXTrBnTtCnz5Jf1bqwVCQoCZM3nQeVXsftA5Y4wxxhhjjDHGmLXMmgU8/TTQuTOFU4sXA9nZ8m58rGIcSjHGGGOMMcYYY4wZaexY4PZtYMECGm7eoQOwfXvZ4eesLA6lGGOMMcYYY4wxxkwwcya9McOorb0AxhhjjDHGGGOMMVbzcCjFGGOMMcYYY4wxxiyOQynGGGOMMcYYY4wxZnEcSjHGGGOMMcYYY4wxi+NQijHGGGOMMcYYY4xZHIdSjDHGGGOMMcYYY8ziOJRijDHGGGOMMcYYYxbHoRRjjDHGGGOMMcYYszgOpRhjjDHGGGOMMcaYxXEoxRhjjDHGGGOMMcYsjkMpxhhjjDHGGGOMMWZxHEoxxhhjjDHGGGOMMYvjUIoxxhhjjDHGGGOMWZyjtRdgaVqtFgCQkJBg5ZUwxhhjjDHGGGPMloisQGQHzLxqXCiVlJQEAOjatauVV8IYY4wxxhhjjDFblJSUhJCQEGsvo9pTSZIkWXsRllRUVITTp0/D398farX9dS9mZmYiLCwMkZGR8PT0tPZymA3g5wSzBfw8ZKXxc4KVxs8JVho/J5gt4OchK02r1SIpKQnh4eFwdKxxdTwWV+NCKXuXkZEBb29vpKenw8vLy9rLYTaAnxPMFvDzkJXGzwlWGj8nWGn8nGC2gJ+HjFmX/ZUKMcYYY4wxxhhjjDG7x6EUY4wxxhhjjDHGGLM4DqXsjIuLC9588024uLhYeynMRvBzgtkCfh6y0vg5wUrj5wQrjZ8TzBbw85Ax6+KZUowxxhhjjDHGGGPM4rhSijHGGGOMMcYYY4xZHIdSjDHGGGOMMcYYY8ziOJRijDHGGGOMMcYYYxbHoVQFFi5ciC5dusDT0xN+fn4YPnw4Ll++XOI2eXl5mDFjBurUqQMPDw+MGjUKSUlJxR8/c+YMHn/8cTRo0ABubm5o1aoVlixZUuIYBw8eRM+ePVGnTh24ubmhZcuW+Pzzz6tcnyRJWLBgAQIDA+Hm5oYBAwbg6tWrJW7z/vvvo0ePHqhVqxZ8fHz0/tzPnj2L3r17w9XVFQ0aNMDHH39c4uMXLlzAqFGj0KhRI6hUKixevFjvY9uzmvqcyMvLwzPPPIO2bdvC0dERw4cPL3ObvXv3QqVSlXlLTEzU6zGY/uz9eXjjxg1MnjwZjRs3hpubG5o2bYo333wTBQUFVR5779696NixI1xcXBAa+v/t3XlM0+cfB/A3MCgWh4oil4DoBi7qgGk0dAcyD9g8mMuMN7rMA8VMzBSjsqh/bF7zCKjBZRHMNDqvybZ4bAreuEysAyriMcUxC26ZqHiUo5/fH/vRUKGl4FZs+34l/ef7PP08z1M/+cR8aJ6+hKysLKPxkydPYuTIkfD394eTkxMOHDjQbEx74Kg5odVqMWHCBISGhsLZ2RnJycmN5mRlZTWqS+7u7s3u2dbZek4AwKhRoxAUFAR3d3f4+flh8uTJuH37drOxWSdMc9S8YK14fthDDtbT6XSIiIiAk5MTLl682Gxs1iYi09iUMuHEiRNISkrCuXPn8NNPP6GmpgbDhg3Dw4cPDXPmzZuH77//Hnv27MGJEydw+/ZtvP/++4bx/Px8dO3aFdu3b4dGo8GSJUuwaNEibNy40TDHw8MDc+bMwcmTJ1FcXIzU1FSkpqbiyy+/NLu/1atXIy0tDRkZGfj555/h4eGB2NhYPHnyxDCnuroaY8aMwaxZsyw+9/379zFs2DAEBwcjPz8fa9aswbJly4z28+jRI/To0QMrV66Er6+vxbFtnaPmRF1dHdq1a4ePP/4YQ4YMMTu3pKQEWq3W8OratavF65BlbD0PL1++DL1ejy1btkCj0WD9+vXIyMjA4sWLzca9ceMGhg8fjpiYGFy8eBHJycmYNm0ajhw5Ypjz8OFDhIeHY9OmTS36TG2do+aETqeDt7c3UlNTER4ebnKep6enUV0qLS01G9ce2HpOAEBMTAx2796NkpIS7Nu3D9evX8cHH3xgNi7rhHmOmhesFc8Pe8jBeikpKfD397fo3KxNRM0QssidO3cEgJw4cUJERCorK8XV1VX27NljmFNcXCwAJC8vz2Sc2bNnS0xMjNm1Ro8eLZMmTTI5rtfrxdfXV9asWWN4VllZKQqFQnbu3NlofmZmpnTo0MHsmvU2b94snTp1Ep1OZ3i2cOFCCQsLa3J+cHCwrF+/3qLY9sZRcqKhKVOmSHx8fKPnubm5AkDu3r3b4pj0bGw5D+utXr1aQkJCzK6dkpIivXv3Nno2duxYiY2NbXI+APn222/NxrRXjpITDUVHR8vcuXMbPW9trbM39pAT2dnZ4uTkJNXV1SbnsE60jKPkRUOsFc8XW83BgwcPSq9evUSj0QgAUavVZtdmbSIyj9+UstC9e/cAAF5eXgD+6dLX1NQYfXOkV69eCAoKQl5entk49TGaolarcfbsWURHR5ucc+PGDZSXlxut3aFDBwwcONDs2pbIy8vDW2+9BTc3N8Oz2NhYlJSU4O7du88U2944Sk60REREBPz8/DB06FCcOXPGaus6MnvIw+bWBv6pTU9/Uy82Ntaq+W0rHCUnLFVVVYXg4GAEBgYiPj4eGo3mX4lrS2w9J/7++2/s2LEDKpUKrq6uJmOzTrSMo+SFpVgrrM8Wc7CiogLTp0/H119/DaVS2fwhwdpE1Bw2pSyg1+uRnJyM119/HX369AEAlJeXw83NrdG9PD4+Pibv0Tl79iy++eYbzJgxo9FYt27doFAo0L9/fyQlJWHatGkm91Mf38fHx+K1LVVeXt5k3IbrkmPlhCX8/PyQkZGBffv2Yd++fQgMDMSgQYNw4cKF/3xtR2YPeXjt2jWkp6dj5syZJuPWx24q7v379/H48WOz73UkjpQTlggLC8PWrVuRnZ2N7du3Q6/XQ6VSoays7Jlj2wpbzomFCxfCw8MDnTt3xq1bt5CdnW32rKwTlnOkvLAEa4X12WIOigimTp2KxMRE9O/f3+KzsjYRmcemlAWSkpJQVFSEXbt2tTpGUVER4uPjsXTpUgwbNqzR+KlTp3D+/HlkZGRgw4YN2LlzJwBgx44daN++veF16tSpVu/hab179zbEfeedd/61uI6AOWEsLCwMM2fORL9+/aBSqbB161aoVCqLLpWk1rP1PPzjjz8QFxeHMWPGYPr06YbnDeMmJia2+myOiDlhLCoqCgkJCYiIiEB0dDT2798Pb29vbNmypcV7s1W2nBMLFiyAWq3Gjz/+CBcXFyQkJEBEALBOPCvmhTHWCuuzxRxMT0/HgwcPsGjRIpNzWJuIWu6Ftt7A827OnDn44YcfcPLkSXTr1s3w3NfXF9XV1aisrDTq5ldUVDS6/PvSpUsYPHgwZsyYgdTU1CbXCQkJAQD07dsXFRUVWLZsGcaPH49Ro0Zh4MCBhnkBAQHQarWGtfz8/IzWjoiIsPhsBw8eRE1NDQCgXbt2hnM1/IWL+rj1Y+R4OdFaAwYMwOnTp58pBplm63l4+/ZtxMTEQKVSNbp4tOGv2Hh6ehrO1VRt8vT0fOZctReOlhOt4erqisjISFy7dq3VMWyJredEly5d0KVLF4SGhuKVV15BYGAgzp07h6ioKNaJZ+BoedEajlYrrM1WczAnJwd5eXlQKBRG6/Tv3x8TJ07Etm3bWJuIWqON77R6bun1eklKShJ/f3+5cuVKo/H6i/j27t1reHb58uVGF/EVFRVJ165dZcGCBRavvXz5cgkODja7N19fX/niiy8Mz+7du/evXnTe8MLIRYsW8aJzcdycaMjURedNGTJkiIwePbrFa5B59pCHZWVl8vLLL8u4ceOktrbWorVTUlKkT58+Rs/Gjx/PS0LFcXOiIVOXFz+ttrZWwsLCZN68eS1ew5bYQ048rbS0VABIbm6uyTmsE+Y5al40xFrRtmw9B0tLS6WwsNDwOnLkiACQvXv3yu+//24yNmsTkXlsSpkwa9Ys6dChgxw/fly0Wq3h9ejRI8OcxMRECQoKkpycHDl//rxERUVJVFSUYbywsFC8vb1l0qRJRjHu3LljmLNx40b57rvv5MqVK3LlyhX56quv5MUXX5QlS5aY3d/KlSulY8eOkp2dLQUFBRIfHy8hISHy+PFjw5zS0lJRq9WyfPlyad++vajValGr1fLgwQOTcSsrK8XHx0cmT54sRUVFsmvXLlEqlbJlyxbDHJ1OZ4jl5+cn8+fPF7VaLVevXm3RZ2xrHDUnREQ0Go2o1WoZOXKkDBo0yPC+euvXr5cDBw7I1atXpbCwUObOnSvOzs5y9OhRSz9espCt52FZWZm89NJLMnjwYCkrKzNa35zffvtNlEqlLFiwQIqLi2XTpk3i4uIihw8fNsx58OCBITcByLp160StVktpaWmLPmNb46g5ISKGf+9+/frJhAkTRK1Wi0ajMYwvX75cjhw5ItevX5f8/HwZN26cuLu7G82xR7aeE+fOnZP09HRRq9Vy8+ZNOXbsmKhUKunZs6c8efLEZFzWCfMcNS9EWCueF7aeg0+7ceOGRb++x9pEZB6bUiYAaPKVmZlpmPP48WOZPXu2dOrUSZRKpYwePdroP9FLly5tMkbDLn1aWpr07t1blEqleHp6SmRkpGzevFnq6urM7k+v18unn34qPj4+olAoZPDgwVJSUmI0Z8qUKU2u39xfk3799Vd54403RKFQSEBAgKxcudJovL4AP/2Kjo42G9fWOXJOBAcHN/m+eqtWrZKePXuKu7u7eHl5yaBBgyQnJ6f5D5VazNbzMDMz0+QZmpObmysRERHi5uYmPXr0MDpz/XhTcadMmdJsbFvmyDnR3J6Tk5MlKChI3NzcxMfHR9599125cOFC8x+qjbP1nCgoKJCYmBjx8vIShUIh3bt3l8TERCkrK2v27KwTpjlyXrBWPB9sPQefZmlTSoS1icgcJ5H/3wxIRERERERERERkJfz1PSIiIiIiIiIisjo2pYiIiIiIiIiIyOrYlCIiIiIiIiIiIqtjU4qIiIiIiIiIiKyOTSkiIiIiIiIiIrI6NqWIiIiIiIiIiMjq2JQiIiIiIiIiIiKrY1OKiIiIiIiIiIisjk0pIiIiIiIiIiKyOjaliIiIyOZNnToVTk5OcHJygqurK3x8fDB06FBs3boVer3e4jhZWVno2LHjf7dRIiIiIjJgU4qIiIjsQlxcHLRaLW7evIlDhw4hJiYGc+fOxYgRI1BbW9vW2yMiIiKip7ApRURERHZBoVDA19cXAQEBeO2117B48WJkZ2fj0KFDyMrKAgCsW7cOffv2hYeHBwIDAzF79mxUVVUBAI4fP44PP/wQ9+7dM3zratmyZQAAnU6H+fPnIyAgAB4eHhg4cCCOHz/eNgclIiIishNsShEREZHdevvttxEeHo79+/cDAJydnZGWlgaNRoNt27YhJycHKSkpAACVSoUNGzbA09MTWq0WWq0W8+fPBwDMmTMHeXl52LVrFwoKCjBmzBjExcXh6tWrbXY2IiIiIlvnJCLS1psgIiIiehZTp05FZWUlDhw40Ghs3LhxKCgowKVLlxqN7d27F4mJifjrr78A/HOnVHJyMiorKw1zbt26hR49euDWrVvw9/c3PB8yZAgGDBiAzz///F8/DxEREZEjeKGtN0BERET0XxIRODk5AQCOHj2KFStW4PLly7h//z5qa2vx5MkTPHr0CEqlssn3FxYWoq6uDqGhoUbPdTodOnfu/J/vn4iIiMhesSlFREREdq24uBghISG4efMmRowYgVmzZuGzzz6Dl5cXTp8+jY8++gjV1dUmm1JVVVVwcXFBfn4+XFxcjMbat29vjSMQERER2SU2pYiIiMhu5eTkoLCwEPPmzUN+fj70ej3Wrl0LZ+d/rtXcvXu30Xw3NzfU1dUZPYuMjERdXR3u3LmDN99802p7JyIiIrJ3bEoRERGRXdDpdCgvL0ddXR0qKipw+PBhrFixAiNGjEBCQgKKiopQU1OD9PR0jBw5EmfOnEFGRoZRjO7du6OqqgrHjh1DeHg4lEolQkNDMXHiRCQkJGDt2rWIjIzEn3/+iWPHjuHVV1/F8OHD2+jERERERLaNv75HREREduHw4cPw8/ND9+7dERcXh9zcXKSlpSE7OxsuLi4IDw/HunXrsGrVKvTp0wc7duzAihUrjGKoVCokJiZi7Nix8Pb2xurVqwEAmZmZSEhIwCeffIKwsDC89957+OWXXxAUFNQWRyUiIiKyC/z1PSIiIiIiIiIisjp+U4qIiIiIiIiIiKyOTSkiIiIiIiIiIrI6NqWIiIiIiIiIiMjq2JQiIiIiIiIiIiKrY1OKiIiIiIiIiIisjk0pIiIiIiIiIiKyOjaliIiIiIiIiIjI6tiUIiIiIiIiIiIiq2NTioiIiIiIiIiIrI5NKSIiIiIiIiIisjo2pYiIiIiIiIiIyOrYlCIiIiIiIiIiIqv7H/yd1ci8SExmAAAAAElFTkSuQmCC",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Create a dual-axis plot\n",
"fig, ax1 = plt.subplots(figsize=(12, 6))\n",
"\n",
"# Plotting price close on the first axis\n",
"ax1.set_xlabel(\"Date\")\n",
"ax1.set_ylabel(\"Price Close\", color=\"green\")\n",
"ax1.plot(ohlcv_df[\"price_close\"], color=\"green\")\n",
"ax1.tick_params(axis=\"y\", labelcolor=\"green\")\n",
"\n",
"# Instantiate a second axes that shares the same x-axis\n",
"ax2 = ax1.twinx()\n",
"ax2.set_ylabel(\"Volume Traded\", color=\"blue\")\n",
"ax2.plot(ohlcv_df[\"volume_traded\"], color=\"blue\")\n",
"ax2.tick_params(axis=\"y\", labelcolor=\"blue\")\n",
"\n",
"# Title and layout\n",
"plt.title(\"Price Close and Trading Volume Over Time\")\n",
"fig.tight_layout()\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "1GzZfwEqQM0L"
},
"outputs": [],
"source": [
"# Calculate 7-day and 30-day moving averages\n",
"ohlcv_df[\"7_day_MA\"] = ohlcv_df[\"price_close\"].rolling(window=7).mean()\n",
"ohlcv_df[\"30_day_MA\"] = ohlcv_df[\"price_close\"].rolling(window=30).mean()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 607
},
"id": "Yn02gj5pQcoH",
"outputId": "5ef86567-6dea-40dd-dd86-65eb5c8f89ad"
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAABKUAAAJOCAYAAABm7rQwAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3iTZdvH8W+S7j1oKaPQsjfIUkQB2SAgyhJQQFQcDNHXhY8+goq4cIGLRwRRUFEEFxtFZQkCLlRk7w3dK03u94+QSGmBtrRJCr/PcfTQJPe47vRKuXv2PM/LZBiGgYiIiIiIiIiIiBuZPT0AERERERERERG5/CgoJSIiIiIiIiIibqeglIiIiIiIiIiIuJ2CUiIiIiIiIiIi4nYKSomIiIiIiIiIiNspKCUiIiIiIiIiIm6noJSIiIiIiIiIiLidglIiIiIiIiIiIuJ2CkqJiIiIiIiIiIjbKSglIiLihRISEhg2bFipHHvmzJmYTCZ2795dKscvbe3ataNdu3aeHkapMZlMjB8/3tPDKLRL/fshIiIipUdBKRERETfasWMHd911F9WqVSMgIICwsDBat27Na6+9RmZmpqeHV+rGjx+PyWRyfQUFBVGvXj0ef/xxUlJSPD28MmP37t2u9/CZZ54pcJvBgwdjMpkICQlx8+jcz2azUbFiRUwmE4sWLfL0cERERKSQfDw9ABERkcvFN998Q79+/fD392fIkCE0aNCAnJwcVq1axUMPPcSWLVuYNm1aqY/j1ltv5eabb8bf37/Uz3Uub731FiEhIaSlpbF06VImTpzIt99+y+rVqzGZTOfdd+nSpW4apfcLCAjgo48+4vHHH8/zfHp6Ol988QUBAQGlPgZv+H58++23HDp0iISEBGbPnk23bt08PSQREREpBAWlRERE3GDXrl3cfPPNVK1alW+//ZYKFSq4Xhs5ciTbt2/nm2++cctYLBYLFovFLec6l759+1KuXDkA7r77bvr06cPnn3/OunXraNWqVYH7ZGRkEBQUhJ+fnzuH6tW6d+/O559/zq+//krjxo1dz3/xxRfk5OTQtWtXvv3221Idgzd8Pz788EOaNm3K0KFDeeyxx0hPTyc4ONitY/DEOUVERMo6le+JiIi4wQsvvEBaWhrTp0/PE5ByqlGjBvfdd995j7Fz50769etHVFQUQUFBXHXVVQUGsqZMmUL9+vUJCgoiMjKS5s2bM2fOHNfrBfWUSkhIoEePHqxatYqWLVsSEBBAtWrVmDVrVr7j//bbb7Rt25bAwEAqV67MM888w4wZMy6qT1X79u0BR/AOHH2KGjRowMaNG2nTpg1BQUE89thjrtfO7mGUlZXF+PHjqVWrFgEBAVSoUIGbbrqJHTt2uLax2+28+uqr1K9fn4CAAMqXL89dd93FqVOnLji+3377jWHDhrnKLuPi4hg+fDgnTpzIs52zPHH79u0MGzaMiIgIwsPDue2228jIyMizbXZ2Nvfffz8xMTGEhobSq1cv9u/fX6T3rVWrViQmJub5/gLMnj2brl27EhUVVeB+b775JvXr18ff35+KFSsycuRIkpKSXK+PGjWKkJCQfGMGGDhwIHFxcdhsNiD/92PlypWYTCbmzp3LxIkTqVy5MgEBAXTo0IHt27fnO94bb7xBtWrVCAwMpGXLlvz4449F6lOVmZnJ/Pnzufnmm+nfvz+ZmZl88cUXrtdfeuklTCYTe/bsybfvuHHj8PPzyzMHfvrpJ7p27Up4eDhBQUG0bduW1atX59nP+X3+888/GTRoEJGRkVxzzTVA4eeK871q3rw5AQEBVK9enXfeecd17LN9+OGHNGvWjMDAQKKiorj55pvZt29fnm22bdtGnz59iIuLIyAggMqVK3PzzTeTnJxcqPdSRETE3RSUEhERcYOvvvqKatWqcfXVVxdr/yNHjnD11VezZMkS7r33XiZOnEhWVha9evVi/vz5ru3+97//MWbMGOrVq8err77KhAkTaNKkCT/99NMFz7F9+3b69u1Lp06dmDx5MpGRkQwbNowtW7a4tjlw4ADXXXcdW7ZsYdy4cdx///3Mnj2b1157rVjX5eQMHkVHR7ueO3HiBN26daNJkya8+uqrXHfddQXua7PZ6NGjBxMmTKBZs2ZMnjyZ++67j+TkZP744w/XdnfddRcPPfSQq4fXbbfdxuzZs+nSpQtWq/W841u2bBk7d+7ktttuY8qUKdx88818/PHHdO/eHcMw8m3fv39/UlNTmTRpEv3792fmzJlMmDAhzzZ33HEHr776Kp07d+a5557D19eX66+/vtDvmdPAgQP5+OOPXeM4fvw4S5cuZdCgQQVuP378eEaOHEnFihWZPHkyffr04Z133qFz586u92HAgAGkp6fnC3pmZGTw1Vdf0bdv3wtm2z333HPMnz+fBx98kHHjxrFu3ToGDx6cZ5u33nqLUaNGUblyZV544QWuvfZaevfuXaTg3JdffklaWho333wzcXFxtGvXjtmzZ7te79+/vytIdra5c+fSuXNnIiMjAUcZYJs2bUhJSeHJJ5/k2WefJSkpifbt27N+/fp8+/fr14+MjAyeffZZ7rzzTqDwc2Xz5s107dqVEydOMGHCBG6//XaeeuopFixYkO88EydOZMiQIdSsWZOXX36ZsWPHsmLFCtq0aeMKJubk5NClSxfWrVvH6NGjeeONNxgxYgQ7d+7ME3AUERHxKoaIiIiUquTkZAMwbrjhhkLvU7VqVWPo0KGux2PHjjUA48cff3Q9l5qaaiQmJhoJCQmGzWYzDMMwbrjhBqN+/frnPfaMGTMMwNi1a1ee8wHGDz/84Hru6NGjhr+/v/F///d/rudGjx5tmEwmY/Pmza7nTpw4YURFReU7ZkGefPJJAzC2bt1qHDt2zNi1a5fxzjvvGP7+/kb58uWN9PR0wzAMo23btgZgvP322/mO0bZtW6Nt27aux++9954BGC+//HK+be12u2EYhvHjjz8agDF79uw8ry9evLjA58+WkZGR77mPPvoo33vmvL7hw4fn2fbGG280oqOjXY9/+eUXAzDuvffePNsNGjTIAIwnn3zyvOPZtWuXARgvvvii8ccff+SZG2+88YYREhJipKenG0OHDjWCg4Nd+x09etTw8/MzOnfu7JozhmEYU6dONQDjvffeMwzD8b5VqlTJ6NOnT57zzp07N981n/39+O677wzAqFu3rpGdne16/rXXXjMA4/fffzcMwzCys7ON6Ohoo0WLFobVanVtN3PmTAPIc8zz6dGjh9G6dWvX42nTphk+Pj7G0aNHXc+1atXKaNasWZ791q9fbwDGrFmzXNdcs2ZNo0uXLq55YxiO731iYqLRqVMn13PO7/PAgQPzjaewc6Vnz55GUFCQceDAAddz27ZtM3x8fIwzb9F3795tWCwWY+LEiXmO+fvvvxs+Pj6u5zdv3mwAxqeffnqOd0pERMT7KFNKRESklDlXlQsNDS32MRYuXEjLli1dJUIAISEhjBgxgt27d/Pnn38CEBERwf79+9mwYUORz1GvXj2uvfZa1+OYmBhq167Nzp07Xc8tXryYVq1a0aRJE9dzUVFR+TJgLqR27drExMSQmJjIXXfdRY0aNfjmm28ICgpybePv789tt912wWPNmzePcuXKMXr06HyvOcugPv30U8LDw+nUqRPHjx93fTVr1oyQkBC+++67854jMDDQ9f9ZWVkcP36cq666CoBNmzbl2/7uu+/O8/jaa6/lxIkTrrmwcOFCAMaMGZNnu7Fjx17gavOrX78+jRo14qOPPgJgzpw53HDDDXneS6fly5eTk5PD2LFjMZv/vQ288847CQsLc2VGmUwm+vXrx8KFC0lLS3Nt98knn1CpUqU88/Bcbrvttjz9ppxzyzmffv75Z06cOMGdd96Jj8+/bU4HDx7syly6kBMnTrBkyRIGDhzoeq5Pnz75MqMGDBjAxo0b85RzfvLJJ/j7+3PDDTcA8Msvv7Bt2zYGDRrEiRMnXHMkPT2dDh068MMPP2C32/Oc/+zvMxRurthsNpYvX07v3r2pWLGia/saNWrka9L++eefY7fb6d+/f565GxcXR82aNV1zNzw8HIAlS5YUWHYpIiLijRSUEhERKWVhYWEApKamFvsYe/bsoXbt2vmer1u3rut1gEceeYSQkBBatmxJzZo1GTlyZL5+OOdSpUqVfM9FRkbm6bezZ88eatSokW+7gp47n3nz5rFs2TJWrlzJ9u3b+eOPP2jWrFmebSpVqlSoJto7duygdu3aeQIbZ9u2bRvJycnExsYSExOT5ystLY2jR4+e9xwnT57kvvvuo3z58gQGBroCakCB/XrOfi+dQRbne7lnzx7MZjPVq1fPs11B3+PCGDRoEJ9++inbt29nzZo15yzdc86Ts8/j5+dHtWrV8vRdGjBgAJmZmXz55ZcApKWlsXDhQvr163fBFRKhcO8B5J87Pj4+JCQkXPD44AgsWa1WrrjiCrZv38727ds5efIkV155ZZ4Svn79+mE2m/nkk08AMAyDTz/9lG7durk+n9u2bQNg6NCh+ebIu+++S3Z2dr7vtXMOnKkwc+Xo0aNkZmYW6rO0bds2DMOgZs2a+cb1119/ueZuYmIiDzzwAO+++y7lypWjS5cuvPHGG+onJSIiXk2r74mIiJSysLAwKlasmKe/UWmpW7cuW7du5euvv2bx4sXMmzePN998k//+97/5ehqd7Vw9gowCeiZdrDZt2rhW3zuXMzNOLpbdbic2NjZPoOJMMTEx592/f//+rFmzhoceeogmTZoQEhKC3W6na9eu+bJnwL3vJTj6So0bN44777yT6OhoOnfufNHHvOqqq0hISGDu3LkMGjSIr776iszMTAYMGFCo/d3xHji/n61bty7w9Z07d1KtWjUqVqzItddey9y5c3nsscdYt24de/fu5fnnn3dt6/w+vvjii3kyAc8UEhKS53FBc7Soc+VC7HY7JpOJRYsWFfienjmmyZMnM2zYML744guWLl3KmDFjmDRpEuvWraNy5cpFPreIiEhpU1BKRETEDXr06MG0adNYu3YtrVq1KvL+VatWZevWrfme//vvv12vOwUHBzNgwAAGDBhATk4ON910ExMnTmTcuHEEBAQU/yJOn6egFdQKes5dqlevzk8//YTVasXX1/ec2yxfvpzWrVsXOdh16tQpVqxYwYQJE/jvf//ret6ZWVMcVatWxW63u7K8nAr6HhdGlSpVaN26NStXruSee+45Z9aYc55s3bqVatWquZ7Pyclh165ddOzYMc/2/fv357XXXiMlJYVPPvmEhIQEVynaxXKOZfv27Xma2Ofm5rJ7924aNWp03v137drFmjVrGDVqFG3bts3zmt1u59Zbb2XOnDk8/vjjgCPz695772Xr1q188sknBAUF0bNnT9c+zqy1sLCwfO9DYRV2rsTGxhIQEFCoz1L16tUxDIPExERq1ap1wTE0bNiQhg0b8vjjj7NmzRpat27N22+/zTPPPFOsaxIRESlNKt8TERFxg4cffpjg4GDuuOMOjhw5ku/1HTt2nHcFu+7du7N+/XrWrl3rei49PZ1p06aRkJBAvXr1APItO+/n50e9evUwDOOCK8wVRpcuXVi7di2//PKL67mTJ0+eMwPJHfr06cPx48eZOnVqvtecWTn9+/fHZrPx9NNP59smNzf3vKuTObNTzs7wefXVV4s9ZmffoNdff73EjvnMM8/w5JNPFthby6ljx474+fnx+uuv57me6dOnk5ycnG/1vwEDBpCdnc3777/P4sWL6d+/f7HHd7bmzZsTHR3N//73P3Jzc13Pz549O0/J6Lk459zDDz9M375983z179+ftm3b5pmXffr0wWKx8NFHH/Hpp5/So0cPgoODXa83a9aM6tWr89JLL+Xpo+V07NixC46psHPFYrHQsWNHFixYwMGDB13Pb9++nUWLFuXZ9qabbsJisTBhwoR8xzUMw/WZT0lJyfM+giNAZTabyc7OvuDYRUREPEGZUiIiIm5QvXp15syZw4ABA6hbty5DhgyhQYMG5OTksGbNGj799FOGDRt2zv0fffRRPvroI7p168aYMWOIiori/fffZ9euXcybN8/VtLpz587ExcXRunVrypcvz19//cXUqVO5/vrrL6rRutPDDz/Mhx9+SKdOnRg9ejTBwcG8++67VKlShZMnTxaq11BJGzJkCLNmzeKBBx5g/fr1XHvttaSnp7N8+XLuvfdebrjhBtq2bctdd93FpEmT+OWXX+jcuTO+vr5s27aNTz/9lNdee42+ffsWePywsDDatGnDCy+8gNVqpVKlSixdupRdu3YVe8xNmjRh4MCBvPnmmyQnJ3P11VezYsWKi8o4a9u2bb6MobPFxMQwbtw4JkyYQNeuXenVqxdbt27lzTffpEWLFtxyyy15tm/atCk1atTgP//5D9nZ2YUu3SsMPz8/xo8fz+jRo2nfvj39+/dn9+7dzJw5k+rVq19wLs2ePZsmTZoQHx9f4Ou9evVi9OjRbNq0iaZNmxIbG8t1113Hyy+/TGpqar5rMZvNvPvuu3Tr1o369etz2223UalSJQ4cOMB3331HWFgYX3311XnHVJS5Mn78eJYuXUrr1q255557sNlsTJ06lQYNGuQJ+lavXp1nnnmGcePGsXv3bnr37k1oaCi7du1i/vz5jBgxggcffJBvv/2WUaNG0a9fP2rVqkVubi4ffPABFouFPn36nHfcIiIinqKglIiIiJv06tWL3377jRdffJEvvviCt956C39/fxo1asTkyZO58847z7lv+fLlWbNmDY888ghTpkwhKyuLRo0a8dVXX+XJbrnrrruYPXs2L7/8MmlpaVSuXJkxY8a4SpguVnx8PN999x1jxozh2WefJSYmhpEjRxIcHMyYMWMuujywOCwWCwsXLmTixInMmTOHefPmER0dzTXXXEPDhg1d27399ts0a9aMd955h8cee8zVUPuWW245Z08ipzlz5jB69GjeeOMNDMOgc+fOLFq0KM/KaUX13nvvERMTw+zZs1mwYAHt27fnm2++OWeQpaSMHz+emJgYpk6dyv33309UVBQjRozg2WefLbD8ccCAAUycOJEaNWrQtGnTEh3LqFGjMAyDyZMn8+CDD9K4cWO+/PLLC86lTZs28ffff/PEE0+cc5uePXsyevRoPvzwQ9e4BwwYwPLlywkNDaV79+759mnXrh1r167l6aefZurUqaSlpREXF8eVV17JXXfdVahrKuxcadasGYsWLeLBBx/kiSeeID4+nqeeeoq//vrLVZbr9Oijj1KrVi1eeeUVV2+4+Ph4OnfuTK9evQBo3LgxXbp04auvvuLAgQMEBQXRuHFjFi1aVGIllyIiIiXNZJRWx00RERG5bIwdO5Z33nmHtLS0cza4FikMu91OTEwMN910E//73/88PRy36927N1u2bLmonmUiIiJlhXpKiYiISJFkZmbmeXzixAk++OADrrnmGgWkpEiysrLy9UmaNWsWJ0+epF27dp4ZlBud/Vnatm0bCxcuvCyuXUREBJQpJSIiIkXUpEkT2rVrR926dTly5AjTp0/n4MGDrFixgjZt2nh6eFKGrFy5kvvvv59+/foRHR3Npk2bmD59OnXr1mXjxo34+fl5eoilqkKFCgwbNoxq1aqxZ88e3nrrLbKzs9m8eTM1a9b09PBERERKnXpKiYiISJF0796dzz77jGnTpmEymWjatCnTp09XQEqKLCEhgfj4eF5//XVOnjxJVFQUQ4YM4bnnnrvkA1IAXbt25aOPPuLw4cP4+/vTqlUrnn32WQWkRETksqFMKRERERERERERcTv1lBIREREREREREbdTUEpERERERERERNzususpZbfbOXjwIKGhoZhMJk8PR0RERERERETkkmIYBqmpqVSsWBGz+dz5UJddUOrgwYPEx8d7ehgiIiIiIiIiIpe0ffv2Ubly5XO+ftkFpUJDQwHHGxMWFubh0Vwcq9XK0qVL6dy5M76+vp4ejpQxmj9SVmnuSnFp7sjF0PyRi6H5I2WV5q4UV0pKCvHx8a4YzLlcdkEpZ8leWFjYJRGUCgoKIiwsTD8gpMg0f6Ss0tyV4tLckYuh+SMXQ/NHyirNXblYF2qbpEbnIiIiIiIiIiLidgpKiYiIiIiIiIiI2ykoJSIiIiIiIiIibnfZ9ZQSERERERGRy4/NZsNqtXp6GGWK1WrFx8eHrKwsbDabp4cjXsTX1xeLxXLRx1FQSkRERERERC5ZhmFw+PBhkpKSPD2UMscwDOLi4ti3b98FG1bL5SciIoK4uLiLmhsKSomIiIiIiMglyxmQio2NJSgoSMGVIrDb7aSlpRESEoLZrO4/4mAYBhkZGRw9ehSAChUqFPtYCkqJiIiIiIjIJclms7kCUtHR0Z4eTpljt9vJyckhICBAQSnJIzAwEICjR48SGxtb7FI+zSoRERERERG5JDl7SAUFBXl4JCKXHufn6mJ6tSkoJSIiIiIiIpc0leyJlLyS+FwpKCUiIiIiIiIiIm6noJSIiIiIiIhIGWQymViwYEGJHGv8+PE0adKkRI5VGtq1a8fYsWM9PQwpYQpKiYiIiIiIiHiZw4cPM3r0aKpVq4a/vz/x8fH07NmTFStWlMr5HnzwwVI79pnGjx+PyWTCZDLh4+NDQkIC999/P2lpaefd7/PPP+fpp58u9fGJe2n1PREREREREREvsnv3blq3bk1ERAQvvvgiDRs2xGq1smTJEkaOHMnff/9d4ucMCQkhJCSkxI9bkPr167N8+XJyc3NZvXo1w4cPJyMjg3feeSfftjk5Ofj5+REVFeWWsYl7KVNKRERERERExIvce++9mEwm1q9fT58+fahVqxb169fngQceYN26defc7/fff6d9+/YEBgYSHR3NiBEj8mQgrVy5kpYtWxIcHExERAStW7dmz549QP7yvWHDhnHjjTcyZcoUKlWqRHR0NCNHjsyz0tqhQ4e4/vrrCQwMJDExkTlz5pCQkMCrr7563uvz8fEhLi6OypUrM2DAAAYPHsyXX36ZZxzvvvsuiYmJBAQEAPnL97Kzs3nkkUeIj4/H39+fGjVqMH36dNfrf/zxB926dSMkJITy5ctz6623cvz48Qu+9+JeypQSERERERGRy4JhGFjtnjm3r7lwq5WdPHmSxYsXM3HiRIKDg/O9HhERUeB+6enpdOnShVatWrFhwwaOHj3KHXfcwahRo5g5cya5ubn07t2bO++8k48++oicnBzWr19/3jGtXLmS6OhoVqxYwc6dOxkwYABNmjThzjvvBGDIkCEcP36clStX4uvrywMPPMDRo0cL94acITAwkJycHNfj7du3M2/ePD7//HMsFkuB+wwZMoS1a9fy+uuv07hxY3bt2uUKOiUlJdG+fXvuuOMOXnnlFTIzM3nkkUfo378/3377bZHHJ6VHQSkRERERERG5LFjt8PJvJzxy7gcaReNXcHwlj+3bt2MYBnXq1CnS8efMmUNWVhazZs1yBbOmTp1Kz549ef755/H19SU5OZkePXpQvXp1AOrWrXveY0ZGRvLiiy8SGRlJvXr1uP7661mxYgV33nknf//9N8uXL2fDhg00b94cgHfffZeaNWsWadwbN25kzpw5tG/f3vVcTk4Os2bNIiYmpsB9/vnnH+bOncuyZcvo2LEjANWqVXO9PnXqVK644gqeffZZ13Pvvfce8fHx/PPPP9SqVatIY5TSo/I9ERERESmSbJsdwzA8PQwRkUtScX++/vXXXzRu3DhPdlXr1q2x2+1s3bqVqKgohg0bRpcuXejZsyevvfYahw4dOu8x69WrlydTqUKFCq5MqK1bt+Lj40PTpk1dr9eoUYPIyMgLjvX3338nJCSEwMBAWrZsSatWrZg6darr9apVq54zIAXwyy+/YLFYaNu2bYGv//rrr3z33XeuPlkhISGuIN+OHTsuOD5xH2VKiYiIiEih7U+zMntbMq3KB9KmYv6yEhERb+ZrdmQseerchVGzZk1MJlOpNDOfMWMGY8aMYfHixXzyySc8/vjjLFu2jKuuuqrA7X19ffM8NplM2O0XX/9Yu3ZtvvzyS3x8fKhYsSJ+fn55Xi+obPFMgYGB5309LS3NlSF2tgoVKhR9wFJqPJopNWnSJFq0aEFoaCixsbH07t2brVu3nnefzz//nObNmxMREUFwcDBNmjThgw8+cNOIRURERC5vf57KxgB2pORccFsREW9jMpnws3jmqzD9pACioqLo0qULb7zxBunp6fleT0pKKnC/unXr8uuvv+bZZ/Xq1ZjNZmrXru167oorrmDcuHGsWbOGBg0aMGfOnKK9iafVrl2b3NxcNm/e7Hpu+/btnDp16oL7+vn5UaNGDRISEvIFpAqjYcOG2O12vv/++wJfb9q0KVu2bCEhIYEaNWrk+bpQwEvcy6NBqe+//56RI0eybt06li1bhtVqpXPnzgV+8JyioqL4z3/+w9q1a/ntt9+47bbbuO2221iyZIkbRy4iIiJyedqT5lh16WS2TSV8IiKl5I033sBms9GyZUvmzZvHtm3b+Ouvv3j99ddp1apVgfsMHjyYgIAAhg4dyh9//MF3333H6NGjufXWWylfvjy7du1i3LhxrF27lj179rB06VK2bdt2wb5S51KnTh06duzIiBEjWL9+PZs3b2bEiBEEBgYWOgBXXAkJCQwdOpThw4ezYMECdu3axcqVK5k7dy4AI0eO5OTJkwwcOJANGzawY8cOlixZwm233YbNZivVsUnReLR8b/HixXkez5w5k9jYWDZu3EibNm0K3Kddu3Z5Ht933328//77rFq1ii5dupTWUEVEREQue2lWOyeyHDfzVjukWO2EF6Zrr4iIFEm1atXYtGkTEydO5P/+7/84dOgQMTExNGvWjLfeeqvAfYKCgliyZAn33XcfLVq0ICgoiD59+vDyyy+7Xv/77795//33OXHiBBUqVGDkyJHcddddxR7nrFmzuP3222nTpg1xcXFMmjSJLVu2EBAQUOxjFtZbb73FY489xr333suJEyeoUqUKjz32GAAVK1Zk9erVPPLII3Tu3Jns7GyqVq1K165dMZvVWtubmAwv+hPX9u3bqVmzJr///jsNGjS44PaGYfDtt9/Sq1cvFixYQKdOnS64T0pKCuHh4SQnJxMWFlYSw/YYq9XKwoUL6d69e75aX5EL0fyRskpzV4pLc+fi/XUqmy92p7oeD6geRmJY0csuyiLNH7kYmj+ek5WVxa5du0hMTHRLoORSY7fbSUlJISwsrFDBnP379xMfH8/y5cvp0KGDG0YonnS+z1dhYy9e0+jcbrczduxYWrdufcGAVHJyMpUqVSI7OxuLxcKbb755zoBUdnY22dnZrscpKSmA4x8Gq9VachfgAc7xl/XrEM/Q/JGySnNXiktz5+LtSs7O8/hoRg6VA0u3RMNbaP7IxdD88Ryr1YphGNjt9hJp0H25ceawON/Ds3377bekpaXRsGFDDh06xKOPPkpCQgLXXHON3u/LgN3uWI3XarXmWaURCv/zzmsype655x4WLVrEqlWrqFy58nm3tdvt7Ny5k7S0NFasWMHTTz/NggUL8pX2AYwfP54JEybke37OnDkEBQWV1PBFRERELnk7KjQlxzcQ/5x0sv2CiUw9RNypnZ4elojIOfn4+BAXF0d8fHyxGmrL+a1YsYLHH3+cPXv2EBISQsuWLXn22WepUqWKp4cmbpCTk8O+ffs4fPgwubm5eV7LyMhg0KBBF8yU8oqg1KhRo/jiiy/44YcfSExMLPL+d9xxB/v27Suw2XlBmVLx8fEcP378kijfW7ZsGZ06dVIasBSZ5o+UVZq7UlyaOxcnzWrnna2pmIB2cQF8dziLKsEW+iWGeHpobqH5IxdD88dzsrKy2LdvHwkJCSrfKwbDMEhNTSU0NLTUm5dL2ZOVlcXu3buJj48vsHyvXLly3l2+ZxgGo0ePZv78+axcubJYASlwZE6dGXg6k7+/P/7+/vme9/X1vWT+QbiUrkXcT/NHyirNXSkuzZ3iOZiaBUD5QB/iw/zhcBYnc4zL7r3U/JGLofnjfjabDZPJhNlsVoPrYnCW4DnfQ5Ezmc1mTCZTgT/bCvuzzqNBqZEjRzJnzhy++OILQkNDOXz4MADh4eEEBgYCMGTIECpVqsSkSZMAmDRpEs2bN6d69epkZ2ezcOFCPvjgg3OuQCAiIiJyOXMmxV/sX7j3pjl6Q1QJ9SXK39E3Is1qJ9tmx9+iX1RERESk6DwalHIGks7uBTVjxgyGDRsGwN69e/NEZNPT07n33nvZv38/gYGB1KlThw8//JABAwa4a9giIiIiZUJWrp3Pd6WSZrUztHb4RQWP9qSeDkqF+BLgYybYx0R6rsHJLBsVghWUEhERkaLzePnehaxcuTLP42eeeYZnnnmmlEYkIiIicmnIzLXzyfYUDmc6Go/+k5RDw+ji9VNJybGRlGPHBMSHOG4fowN8SE+zciLbRoVglSOJiIhI0enPWiIiIiKXmMxcOx9vT3YFpAD+Tiq4/2ZhOLOk4oJ8XNlWzhK+E1m2ixipiIiIXM4UlBIRERG5hGTk2vloezJHMm0E+Zi4MTEUgF0pVjJz7cU6prOfVNWQfzOiogMcQamT2QpKiYiISPEoKCUiIiJyiciw2vloWzJHM20E+5gYVCOc2hH+xARYsAP/JOcU67h7zmhy7uQMSilTSkREPGXmzJlERER4ehhyERSUEhEREbkEpFvtzNmezLEsGyE+ZgbVDKdcoKP/U91IfwD+PlX0Er6kbBspOXbMQOUzekc5y/dOZduwF6JPqIiIFF5CQgImkynf18iRI8+735nbBgcHU7NmTYYNG8bGjRvdMm7nudetW5fn+ezsbKKjozGZTPn6Rl+MAQMG8M8//5TY8S4kMzOTqKgoypUrR3Z28cvi5V8KSomIiIiUcWmnA1LHs2yE+DoCUtEB/65nUyfCEZTanWolo4glfM7SvQrBPvhZTK7nw/3M+JjAZkByTvHKAkVEpGAbNmzg0KFDrq9ly5YB0K9fvwvuO2PGDA4dOsSWLVt44403SEtL48orr2TWrFmlPWwA4uPjmTFjRp7n5s+fT0hISImfKzAwkNjY2BI/7rnMmzeP+vXrU6dOHRYsWFDq57NaraV+Dk9TUEpERESkDEu12pizLZkTWTZCfc0MrhlO1OnSOqeoAAvlAy0YOFbhKwpnk/MqIXlX2DOZTK7zqIRPRKRkxcTEEBcX5/r6+uuvqV69Om3btr3gvhEREcTFxZGQkEDnzp357LPPGDx4MKNGjeLUqVMAnDhxgoEDB1KpUiWCgoJo2LAhH330kesYs2bNIjo6Ol82UO/evbn11lvPe/6hQ4fy8ccfk5mZ6XruvffeY+jQofm2/f3332nfvj2BgYFER0czYsQI0tLSAFi6dCkBAQEkJSXl2ee+++6jffv2QP7yvfHjx9OkSRM++OADEhISCA8P5+abbyY1NdW1TWpqKoMHDyY4OJgKFSrwyiuv0K5dO8aOHXve6wKYPn06t9xyC7fccgvTp093PT9t2jQqVqyI3Z73jzQ33HADw4cPdz3+4osvaNq0KQEBAVSrVo0JEyaQm/vvoiQmk4m33nqLXr16ERwczMSJE7HZbNx+++0kJiYSGBhI7dq1ee211/KcJzc3lzFjxhAREUF0dDSPPPIIQ4cOpXfv3q5t7HY7kyZNch2ncePGfPbZZxe85tKmoJSIiIhIGZWa4whIncy2EXY6QyrS31Lgts4Svr+KUMJnGEaBTc6dol0r8OXme01ExBsZhkF6TrpHvoxiljrn5OTw4YcfMnz4cEwm04V3KMD9999PamqqK+MqKyuLZs2a8c033/DHH38wYsQIbr31VtavXw84MrJsNhtffvml6xhHjx7lm2++yRNkKUizZs1ISEhg3rx5AOzdu5cffvghXzArPT2dLl26EBkZyYYNG/j0009Zvnw5o0aNAqBDhw5ERES4jgNgs9n45JNPGDx48DnPv2PHDhYsWMDXX3/N119/zffff89zzz3nev2BBx5g9erVfPnllyxbtowff/yRTZs2XfA93LFjB2vXrqV///7079+fH3/8kT179rjerxMnTvDdd9+5tj958iSLFy92jfXHH39kyJAh3Hffffz555+88847zJw5k4kTJ+Y5z/jx47nxxhv5/fffGT58OHa7ncqVK/Ppp5/y559/8t///pfHHnuMuXPnuvZ5/vnnmT17NjNmzGD16tWkpKTky+SaNGkSs2bN4u2332bLli3cf//93HLLLXz//fcXvPbS5HPhTURERETE26Tk2PhoezKnsu2E+ZkZVCOciHMEpMBRwrfyYAZ706ykW+0E+174b5NJOXZSrXbMJqhUQFDKlSmlFfhEpIzIsGYQMqnky8gKI21cGsF+wUXeb8GCBSQlJTFs2LBin7tOnToA7N69G4BKlSrx4IMPul4fPXo0S5YsYe7cubRs2ZLAwEAGDRrEzJkz6dKlCwAffvghVapUoV27dhc83/Dhw3nvvfe45ZZbmDlzJt27dycmJibPNnPmzCErK4tZs2YRHOx4X6ZOnUrPnj15/vnnKV++PDfffDNz5szh9ttvB2DFihUkJSXRp0+fc57bbrczc+ZMQkMdq8/eeuutrFixgokTJ5Kamsr777/PnDlz6NChA+Aod6xYseIFr+m9996jW7duREZGAtClSxdmzJjB+PHjiYyMpFu3bnmO+9lnn1GuXDmuu+46ACZMmMCjjz7qyhirVq0aTz/9NA8//DBPPvmk6zyDBg3itttuy3PuCRMmuP4/MTGRtWvXMnfuXPr37w/AlClTGDduHDfeeKPrfVy4cKFrn+zsbJ599lmWL19Oq1atXOdftWoV77zzTqEy8EqLMqVEREREypgcm8GcbY6AVHghAlIAEf4W4oJ8MICtSYXLltp7unSvYpAPvub8f5139q06qfI9EZFSM336dLp165YncHL33XcTEhLi+roQZ5aWM9PKZrPx9NNP07BhQ6KioggJCWHJkiXs3bvXtc+dd97JsmXLOHjwIOAolRs2bFihsrVuueUW1q5dy86dO5k5c2aB2VV//fUXjRs3dgWkAFq3bo3dbmfr1q0ADB48mJUrV7rGMHv2bK6//vrzrriXkJDgCkgBVKhQgaNHjwKwc+dOrFYrLVu2dL0eHh5O7dq1z3s9NpuN999/n1tuuSXPNc6cOdNVsjd48GDmzZvnKnmcPXs2N998M2azI+zy66+/8tRTT+X5vt15550cOnSIjIwM13GbN2+e7/xvvPEGzZo1IyYmhpCQEKZNm+b6XiUnJ3PkyJE812SxWGjWrJnr8fbt28nIyKBTp055zj9r1ix27Nhx3msvbcqUEhERESlj9qZZScqxE+xjYlDNcML9zh+Qcqob4cfhjFz+TsqhaUzgBbffc7p0r0po/iwp+HcFPmVKiUhZEeQbRNq4NI+du6j27NnD8uXL+fzzz/M8/9RTT+XJdLqQv/76C3Bk2QC8+OKLvPbaa7z66qs0bNiQ4OBgxo4dS07Ov30Hr7jiCho3bszHH39Mz5492bJlC998802hzhcdHU2PHj24/fbbycrKolu3bnn6OhVWixYtqF69Oh9//DH33HMP8+fPZ+bMmefdx9c3fw/Es3s9FdWSJUs4cOAAAwYMyPO8zWZjxYoVdOrUiZ49e2IYBt988w0tWrTgxx9/5JVXXnFtm5aWxoQJE7jpppvyHT8gIMD1/2cG6QA+/vhjHnzwQSZPnkyrVq0IDQ3lxRdf5Keffir0+J19ur755hsqVaqU5zV/f/9CH6c0KCglIiIiUsYcynAEixLD/AodkAKoE+nPd6dL+NKsdkLOU8JnGIYrU6qgflLwb1AqM9cgI9dOkI+S8EXEu5lMpmKV0HnKjBkziI2N5frrr8/zfGxsbJFWnXv11VcJCwujY8eOAKxevZobbrjBlfljt9v5559/qFevXp79hg8fzquvvsqJEyfo2LEj8fHxhT7n8OHD6d69O4888ggWS/5/q+rWrcvMmTNJT093BWJWr16N2WzOk7k0ePBgZs+eTeXKlTGbzfnei6KoVq0avr6+bNiwgSpVqgCOTKN//vmHNm3anHO/6dOnc/PNN/Of//wnz/MTJ05k+vTpdOrUiYCAAG666SZmz57N9u3bqV27Nk2bNnVt27RpU7Zu3UqNGjWKNObVq1dz9dVXc++997qeOzO7KTw8nPLly7NhwwbXNdhsNjZt2kSTJk0AqFevHv7+/uzdu9ejpXoFUVBKREREpIw5nOFoLB4XVLRbuXA/CxWDfDiYkcvWpGyanSdb6mS2jbRcOxYTVAouOCjlZzER5msmxWrnZJaNoBAFpURESordbmfGjBkMHToUH5/C/7xPSkri8OHDZGdn888///DOO++wYMECZs2a5Sp7q1mzJp999hlr1qwhMjKSl19+mSNHjuQLSg0aNIiHH36Yd999l1mzZhVp/F27duXYsWOEhYUV+PrgwYN58sknGTp0KOPHj+fYsWOMHj2aW2+9lfLly+fZbvz48UycOJG+ffteVGZPaGgoQ4cO5aGHHiIqKorY2FiefPJJzGbzOcsSjx07xldffcWXX35JgwYN8rw2ZMgQbrzxRk6ePElUVBSDBw+mR48ebNmyJU+pH8B///tfevToQZUqVejbty9ms5lff/2VP/74g2eeeeacY65ZsyazZs1iyZIlJCYm8sEHH7BhwwZX1hs4eoJNmjSJGjVqUKdOHaZMmcKpU6dc1xQaGsqDDz7I/fffj91u55prriE5OZnVq1cTFhZW4MqI7qI7BxEREZEyxDAMV1CqQhGDUuDIloILr8LnXHWvUrAvPgX0k3KKVrNzEZFSsXz5cvbu3XvB1e7Odtttt1GhQgXq1KnDPffcQ0hICOvXr2fQoEGubR5//HGaNm1Kly5daNeuHXFxcfTu3TvfscLDw+nZsychISEFvn4+JpOJcuXK4efnV+DrQUFBLFmyhJMnT9KiRQv69u1Lhw4dmDp1ap7tatSoQcuWLfntt9/Ou+peYb388su0atWKHj160LFjR1q3bk3dunXzlNCdydmI3dnA/EwdOnQgMDCQDz/8EID27dsTFRXF1q1b87zf4GiM/vXXX7N06VJatGjBVVddxSuvvELVqlXPO9677rqLm266iQEDBnDllVdy4sSJPFlTAI888ggDBw5kyJAhtGrVipCQELp06ZLnmp5++mmeeOIJJk2aRN26denatSvffPNNnuCWJ5iM4q5LWUalpKQQHh5OcnLyOSO2ZYXVamXhwoV07949X92syIVo/khZpbkrxXWpzJ3UHBtvbDmFCXigcXSBDcjPJyXHxptbTgEwsn4koeco/1uwK4W/k3K4Ji6Iayqcuw/Ksv1pbDyWRcvYQNpXKjslMUV1qcwf8QzNH8/Jyspi165dJCYmnjPoIOdmt9u57rrraNSoEVOmTPH0cEpFeno6lSpVYvLkya5V/so6u91O3bp16d+/P08//XSpned8n6/Cxl5UviciIiJShhw6nSVVLsBS5IAUQJifhcrBPuxPdzQ8bxGbv4TPMAxXptS5mpw7RTubnWflFnksIiLivU6dOsW3337LqlWrePvttz09nBKzefNm/v77b1q2bElycjJPPfUUADfccIOHR1Z8e/bsYenSpbRt25bs7GymTp3Krl278mVreSMFpURERETKkOL2kzpTnQj/00Gp7AKDUieybGTkGviYoOIFzuMs3zup8j0RkUvKFVdcwalTpxg/fnyexuOXgpdeeomtW7fi5+dHs2bN+PHHHylXrpynh1VsZrOZmTNn8uCDD2IYBg0aNGD58uXUrVvX00O7IAWlRERERMqQi+kn5VQ70o/lB9I5kJ5LSo6NsLNK+PYUsp8UQHSAYxxJ2XZy7cYFtxcRkbJh9+7d2O12UlJSPD2UEnXFFVewceNGTw+jRMXHx7N69WpPD6NY1OhcREREpIwwDIPDmRefKRXqayE+xLH/30k5+V53lu5VvUDpHkCwjwl/swkDSFK2lIiIiBSBglIiIiIiZUSK1U5GroEZiA28uIT3OhEFr8JnGAZ7U0/3kwq5cFDKZDIRpRX4REREpBgUlBIREREpI5ylezGBlosuk6sd4Y8JR+P0MzOcjmXZyLQZ+JqhQnDhAl9RrmbnCkqJiIhI4SkoJSIiIlJGlESTc6cQXzPxpzOhtib9my3lzJKqHOyLxVS4wJez2bmCUiIiIlIUCkqJiIiIlBGHSjAoBVA30g+Av07921fK2eS8MKV7TlqBT0RERIpDQSkRERGRMsAwjDNW3it8wOh8aoc7SvgOZ+ZyKtuGYRjsK0KTc6foM8r3DMMokbGJiIjIpU9BKREREZEyIDnHTpbNwGKCcqczky5WkK/ZFXz6+1Q2RzJtZNkM/MymImVjRfhbMAE5doO0XHuJjE1ERKQkDBs2jN69e3t6GHIOCkqJiIiIlAH/Njn3uegm52eq61yFLymbvaezpOJDfDAXsp8UgI/ZRIS/47ZSfaVERC7eW2+9RaNGjQgLCyMsLIxWrVqxaNGiPNtkZWUxcuRIoqOjCQkJoU+fPhw5cuS8x125ciUmkwmTyYTZbCY8PJwrrriChx9+mEOHDpXmJeU5f2RkJFlZWXle27Bhg2tsJem1115j5syZJXrM81m7di0Wi4Xrr7/ebecsyxSUEhERESkDDrlK90qmn5RTrQg/TMDRTBu/nXD8glCUflJO0f6OcZ1UUEpE5KJVrlyZ5557jo0bN/Lzzz/Tvn17brjhBrZs2eLa5v777+err77i008/5fvvv+fgwYPcdNNNhTr+1q1bOXjwIBs2bOCRRx5h+fLlNGjQgN9//720LimP0NBQ5s+fn+e56dOnU6VKlRI/V3h4OBERESV+3HOZPn06o0eP5ocffuDgwYOlei7DMMjNzS3Vc5Q2BaVEREREyoCSXHnvTIE+ZhJOl/AdPx1QqlKEflJOUc4V+NTsXETkovXs2ZPu3btTs2ZNatWqxcSJEwkJCWHdunUAJCcnM336dF5++WXat29Ps2bNmDFjBmvWrHFtcz6xsbHExcVRq1Ytbr75ZlavXk1MTAz33HOPa5sNGzbQuXNnqlevTmRkJG3btmXTpk2u14cPH06PHj3yHNdqtRIbG8v06dPPe/6hQ4fy3nvvuR5nZmby8ccfM3To0Hzbzps3j/r16+Pv709CQgKTJ092vfbYY49x5ZVX5tuncePGPPXUU0D+8r127doxZswYHn74YaKiooiLi2P8+PF59v/777+55pprCAgIoF69eixfvhyTycSCBQvOe11paWl88skn3HPPPVx//fV5MrQGDRrEgAED8mxvtVopV64cs2bNAsButzNp0iQSExMJDAykcePGfPbZZ67tnZlmixYtolmzZvj7+7Nq1Sp27NjBDTfcQPny5QkJCaFFixYsX748z7kOHTrE9ddfT2BgIImJicyZM4eEhAReffVV1zZJSUnccccdxMTEEBYWRvv27fn111/Pe80XS0EpERERES9nGAaHM08HpQJLNigFUDfS3/X//hYT5YtxDtcKfMqUEhFvZhiQnu6Zr2IuBGGz2fj4449JT0+nVatWAGzcuBGr1UrHjh1d29WpU4cqVaqwdu3aIp8jMDCQu+++m9WrV3P06FEAUlNTGTJkCIsWLWLNmjXUrFmT7t27k5qaCsAdd9zB4sWL85T9ff3112RkZOQLvpzt1ltv5ccff2Tv3r2AI/CUkJBA06ZN82y3ceNG+vfvz80338zvv//O+PHjeeKJJ1zBnsGDB7N+/Xp27Njh2mfLli389ttvDBo06Jznf//99wkODuann37ihRde4KmnnmLZsmWA4/3u3bs3QUFB/PTTT0ybNo3//Oc/hXof586dS506dahduza33HIL7733nmsBkMGDB/PVV1+Rlpbm2n7JkiVkZGRw4403AjBp0iRmzZrF22+/zZYtW7j//vu55ZZb+P777/Oc59FHH+W5557jr7/+olGjRqSlpdG9e3dWrFjB5s2b6dq1Kz179nS9vwBDhgzh4MGDrFy5knnz5jFt2jTX99qpX79+HD16lEWLFrFx40aaNm1Khw4dOHnyZKGuvzhK/q5GRERERErUqWw72c4m54El0+T8TLXC/VhsArsB8cG+Reon5XTmCnwiIl4rIwNCQjxz7rQ0CA4u9Oa///47rVq1Iisri5CQEObPn0+9evUAOHz4MH5+fvnK0sqXL8/hw4eLNbw6deoAsHv3bmJjY2nfvj12u52UlBTCwsKYNm0aERERfP/99/To0YOrr76a2rVr88EHH/Dwww8DMGPGDPr160fIBd7j2NhYunXrxsyZM/nvf//Le++9x/Dhw/Nt9/LLL9OhQweeeOIJAGrVqsWff/7Jiy++yLBhw6hfvz6NGzdmzpw5rm1mz57NlVdeSY0aNc55/kaNGvHkk08CULNmTaZOncqKFSvo1KkTy5YtY8eOHaxcuZK4uDgAJk6cSKdOnS74Hk6fPp1bbrkFgK5du5KcnMz3339Pu3bt6NKlC8HBwcyfP59bb70VgDlz5tCrVy9CQ0PJzs7m2WefZfny5a7gY7Vq1Vi1ahXvvPMObdu2dZ3nqaeeyjOeqKgoGjdu7Hr89NNPM3/+fL788ktGjRrF33//zfLly9mwYQPNmzcH4N1336VmzZqufVatWsX69es5evQo/v6OP1a99NJLLFiwgM8++4wRI0Zc8PqLQ5lSIiIiIl7OmSVVPtAHSwk3gAUI8DFTLcwPgMSwopfuwb+ZUilWOzm24mUDiIjIv2rXrs0vv/zCTz/9xD333MPQoUP5888/C71//fr1CQkJISQkhG7dul1we2dGj7PR+JEjRxgxYgTNmjUjMjKSsLAw0tLS8mTf3HHHHcyYMcO1/aJFiwoMLhVk+PDhzJw5k507d7J27VoGDx6cb5u//vqL1q1b53mudevWbNu2DZvN8UeQwYMHM2fOHNc1fPTRRwUe60yNGjXK87hChQqurKGtW7cSHx/vCkgBtGzZ8oLXs3XrVtavX8/AgQMB8PHxYcCAAa5SRh8fH/r378/s2bMBSE9P54svvnCNdfv27WRkZNCpUyfX9y0kJIRZs2blyQQDXIElp7S0NB588EHq1q1LREQEISEh/PXXX67v1datW/Hx8cmTiVajRg0iIyNdj3/99VfS0tJcjfOdX7t27cp3/pKkTCkRERERL1da/aTO1C0+hO3hOTSM8r/wxgUI9DET6GMiM9fgZLatVMcqIlJsQUGOjCVPnbsI/Pz8XNk+zZo1Y8OGDbz22mu88847xMXFkZOTQ1JSUp5sqSNHjriCKQsXLsRqdayqGhgYeMHz/fXXXwAkJCQAjr5PJ06cYNKkSdStW5fAwEBatWpFTk6Oa58hQ4bw6KOPsnbtWtasWUNiYiLXXnttoa6vW7dujBgxgttvv52ePXsSHR1dqP3ONnDgQB555BE2bdpEZmYm+/btu2D5oK9v3j/AmEwm7HZ7sc7vNH36dHJzc6lYsaLrOcMw8Pf3Z+rUqYSHhzN48GDatm3L0aNHWbZsGYGBgXTt2hXAVdb3zTffUKlSpTzHdmYuOQWflXH34IMPsmzZMl566SVq1KhBYGAgffv2zfO9upC0tDQqVKjAypUr871Wmo3idbcgIiIi4uUOZTh+qSjNQE+wr5nG0QEXdYxofwv7c3M5maWglIh4KZOpSCV03sRut5OdnQ04glS+vr6sWLGCPn36AI5smL1797pKv6pWrVroY2dmZjJt2jTatGlDTEwMAKtXr2bq1Kl07tyZsLAwDhw4wPHjx/PsFx0dTe/evZkxYwZr167ltttuK/Q5fXx8GDJkCC+88AKLFi0qcJu6deuyevXqPM+tXr2aWrVqYbE4MnQrV65M27ZtmT17NpmZmXTq1InY2NhCj+NstWvXZt++fRw5coTy5csDjqbv55Obm8usWbOYPHkynTt3zvNa7969+eijj7j77ru5+uqriY+P55NPPmHRokX069fPFSCrV68e/v7+7N27N0+pXmGsXr2aYcOGuXpTpaWlsXv37jzXlJuby+bNm2nWrBngyMw6deqUa5umTZty+PBhfHx8XIFJd9DdgoiIiIgXMwyDIxmOEoUKXh7oiQ6wsD89lxPZuUDxMq5ERATGjRtHt27dqFKlCqmpqcyZM4eVK1eyZMkSAMLDw7n99tt54IEHiIqKIiwsjNGjR9OqVSuuuuqqCx7/6NGjZGVlkZqaysaNG3nhhRc4fvw4n3/+uWubmjVr8uGHH1KnTh3sdjuPPPJIgRlXd9xxBz169MBmsxW4et75PP300zz00EPnzJL6v//7P1q0aMHTTz/NgAEDWLt2LVOnTuXNN9/Ms93gwYN58sknycnJ4ZVXXinSGM7WqVMnqlevztChQ3nhhRdITU3l8ccfB/4tbTzb119/zalTp7j99tsJDw/P81qfPn2YPn06d999N+BYhe/tt9/mn3/+4bvvvnNtFxoayoMPPsj999+P3W7nmmuuITk5mdWrVxMWFnbe97ZmzZp8/vnn9OzZE5PJxBNPPJEn86tOnTp07NiRESNG8NZbb+Hr68v//d//ERgY6Lqmjh070qpVK3r37s0LL7xArVq1OHjwIN988w033nhjvpLBkqKeUiIiIiJe7GS2jRy7gY/p375N3irKXyvwiYiUhKNHjzJkyBBq165Nhw4d2LBhA0uWLMnT3PqVV16hR48e9OnThzZt2hAXF5cnqHQ+tWvXpmLFijRr1oznnnuOjh078scff7gaqYOjHC0pKYl27doxdOhQxowZU2AGUseOHalQoQJdunTJU7pWGH5+fpQrV+6cwZ6mTZsyd+5cPv74Yxo0aMB///tfnnrqKYYNG5Znu759+3LixAkyMjLo3bt3kcZwNovFwoIFC0hLS6NFixbccccdrtX3AgIKziiePn06HTt2zBeQAkdQ6ueff+a3334DHAG0P//8k0qVKuXrl/X000/zxBNPuEomu3btyjfffENiYuJ5x/zyyy8TGRnJ1VdfTc+ePenSpUu+lQxnzZpF+fLladOmDTfeeCN33nknoaGhrmsymUwsXLiQNm3acNttt1GrVi1uvvlm9uzZ48oYKw0mwyjmupRlVEpKCuHh4SQnJxMWFubp4VwUq9XKwoUL6d69e76aWJEL0fyRskpzV4qrrM6dP05m8fWeNCoF+3BrrQhPD+e8tifn8NnOFGIDLQyvE3nhHcqQsjp/xDto/nhOVlYWu3btIjEx8ZwBBTm3M1ffM5sLzmlJS0ujUqVKzJgxg5tuusnNI3SP1atXc80117B9+3aqV6/u6eGUiP379xMfH8/y5cvp0KFDsY5xvs9XYWMv3p0DLiIiInKZc0eT85LizOQ6mWXDMIxz/uVbRETKPrvdzvHjx5k8eTIRERH06tXL00MqMfPnzyckJISaNWuyfft27rvvPlq3bl2mA1LffvstaWlpNGzYkEOHDvHwww+TkJBAmzZtPDou77+7EREREbmMOYNS3t5PCiDcz4zFBLkGJOfYifD37nJDEREpvr1795KYmEjlypWZOXMmPj7e/+9UYaWmpvLII4+wd+9eypUrR8eOHZk8ebKnh3VRrFYrjz32GDt37iQ0NJSrr76a2bNnezx789KZNSIiIiKXGLthcCSz7GRKmU0mIv0tHM+ycTLbpqCUiMglLCEhgUu1G9CQIUMYMmSIp4dRorp06UKXLl08PYx81OhcRERExEudyLJhtYOf2eRqIu7tnCV8J7JsfLfrOzYcOP8y2iIiInL5UlBKRERExEs5S/fKB1kwl5H+TNGng2dfbV1A+1ntufLdKxm/cjx2w36BPUVESs+lmtEj4kkl8bnyaFBq0qRJtGjRgtDQUGJjY+nduzdbt2497z7/+9//uPbaa4mMjCQyMpKOHTuyfv16N41YRERExH0OOZucB3p/6Z5TVICFUxl7mfzjPQAYGEz4fgLXz7meExknPDw6EbncOPvlZGRkeHgkIpce5+fqYvpSefQO5/vvv2fkyJG0aNGC3NxcHnvsMTp37syff/5JcHBwgfusXLmSgQMHcvXVVxMQEMDzzz9P586d2bJlC5UqVXLzFYiIiIiUnrK08p5TqK+NjzaOIMOaTMtKLbm72d2MXDiSxdsX02xaMz7r/xnNKzb39DBF5DJhsViIiIjg6NGjAAQFBWll0CKw2+3k5OSQlZWF2axCK3EwDIOMjAyOHj1KREQEFkvxWwx49A5n8eLFeR7PnDmT2NhYNm7ceM5lCWfPnp3n8bvvvsu8efNYsWLFJdeITERERC5fdsPgaKZz5T3ProxTFK+u+S/7Tm0kwDecWb0/ona5ajSr2Iw+c/uw/eR2Wr/XmindpnBn0zv1i6GIuEVcXByAKzAlhWcYBpmZmQQGBupntuQTERHh+nwVl1f92S05ORmAqKioQu+TkZGB1Wot0j4iIiIi3u54lo1cA/zNJiL9y8Zfp7/+52teXfcyAH2bvE5oYDwAjco3YsOdGxi2YBhfbP2Cu76+i7X71/Jm9zcJ9A305JBF5DJgMpmoUKECsbGxWK1WTw+nTLFarfzwww+0adPmokq05NLj6+t7URlSTl4TlLLb7YwdO5bWrVvToEGDQu/3yCOPULFiRTp27Fjg69nZ2WRnZ7sep6SkAI4PV1n/geQcf1m/DvEMzR8pqzR3pbjK2tzZn5oDQGygmdzcXA+P5sL2pexj6IKhAHSpNYL6FbpzND2HGD/H68GWYD656RMmr5vMEyufYOYvM9l0cBOf9PmE6pHVPTjywilr80e8i+aP9yiJX6IvJ3a7ndzcXCwWi947ycNut2O3n3sRk8L+vDMZXrIMwT333MOiRYtYtWoVlStXLtQ+zz33HC+88AIrV66kUaNGBW4zfvx4JkyYkO/5OXPmEBQUdFFjFhERESkthyKrkRRagaiU/ZRP2uPp4ZxXrpHL49sf5+/0v6keWJ2xLT8hLbwq0cn7iU3OP/bfUn9j8p7JJOcmE2QOYmzVsbQMb+mBkYuIiEhpyMjIYNCgQSQnJxMWFnbO7bwiKDVq1Ci++OILfvjhBxITEwu1z0svvcQzzzzD8uXLad783M0yC8qUio+P5/jx4+d9Y8oCq9XKsmXL6NSpk1Ippcg0f6Ss0tyV4iprc2f2jjQOZ9q4Pj6QOuF+nh7OeT323WO8tPYlwvzDWD98PUn2Snx3KIsaoT7cULXgxWsOpB5g4OcDWXdgHQCPXv0oE9pO8NqeJWVt/oh30fyRskpzV4orJSWFcuXKXTAo5dHyPcMwGD16NPPnz2flypWFDki98MILTJw4kSVLlpw3IAXg7++Pv79/vud9fX0vmQ/VpXQt4n6aP1JWae5KcZWFuWOzGxzLsgFQOTQAX1/vLZlYtG0RL619CYDpvaZTO7Y2u1JygCxO5RjnfK8TohL4/rbveXDpg0xZP4Xn1jxHeGA4j17zqBtHX3RlYf6I99L8kbJKc1eKqrDzxaNdM0eOHMmHH37InDlzCA0N5fDhwxw+fJjMzEzXNkOGDGHcuHGux88//zxPPPEE7733HgkJCa590tLSPHEJIiIiIiXuWJYNmwH+FhMRft7b5PxAygGGLHCsfnxv83vpW68vANEBjiDaqWwbtvMk5ftZ/Hi92+tM6TYFgP98+x+W7VhWyqMWERERb+HRu5y33nqL5ORk2rVrR4UKFVxfn3zyiWubvXv3cujQoTz75OTk0Ldv3zz7vPTSS564BBEREZESdzjD0di8QpCP15az5dpzGThvIMczjtMkrgmTu0x2vRbqa8bXDHYgKdt2wWONbDGS26+4HbthZ+C8gexO2l16AxcRERGv4fHyvQtZuXJlnse7d+8uncGIiIiIeAlnUCouyGsWSs5n/Mrx/Lj3R0L8Qpjbdy4BPgGu10wmE1H+Fo5k2jiZbSM64PzXYTKZmNp9Kr8e+ZWfD/5Mn7l9WHXbKgJ9A0v7MkRERMSDvDcfXEREROQydSjDsYyytwallu1YxrM/PgvA/3r+j5rRNfNt4wxEnci6cKYUQIBPAPP6z6NcUDk2HdrEvQvvLdQfMEVERKTsUlBKRERExIvkntHkvIIXBqWOpB3hlvm3YGAwoukIbm5wc4HbOftKFTYoBVAlvAof9/kYs8nMzF9m8s7Gd0pkzCIiIuKdFJQSERER8SLHMnOxGxBoMRHm6323atM3T+do+lEaxDbg1a6vnnO7KH9HUOpkIXpKnalDtQ481+E5AMYsGsPafWuLPVYRERHxbt53pyMiIiJyGTuc+W8/KW9scv7l1i8BGN1y9Hl7PhUnU8rpwasfpG+9vljtVvp+2pcjaUeKN1gRERHxagpKiYiIiHiRQ2esvOdtjqQdYf2B9QD0qNXjvNuGns7yyrIZ2OxF6w1lMpl4r9d71C1Xl4OpB+n/WX+sNmvxBi0iIiJeS0EpERERES/izSvvfbPtGwwMmlVoRsXQiufdNsBiwpnnlWkresPyUP9Q5g+YT6hfKD/s+YFHlj9SjBGLiIiIN1NQSkRERMRLWO0GxzId5W7eGJRylu71qt3rgtuaTCYCLI6wVGauvVjnq12uNrNunAXAK+te4aPfPyrWcURERMQ7KSglIiIi4iWOZuZiAME+Jlf5m7fIys1i2c5lAPSs1bNQ+wT6OK4hM7fomVJOvev05rFrHgPgjq/u4Pcjvxf7WCIiIuJdvOtuR0REROQydmbpnrc1Of9217dkWDOoHFaZJnFNCrVPoM/pTClb8TKlnJ667ik6V+9MhjWDGz+5kaSspIs6noiIiHgHBaVEREREvMTJbEfpXrkA7yvd+2rrVwD0qNmj0AGzQMvFZ0oBWMwW5tw0h4SIBHac2sET3z5xUccTERER76CglIiIiIiXSDodlIr0t3h4JHkZhsFX/ziCUoXpJ+XkypQqZk+pM0UHRfNuz3cB+N+m/3Eo9dBFH1NEREQ8S0EpERERES+RlO0I3kT4e9ct2ubDmzmQeoBg32CuS7yu0Pu5ekoVY/W9grRPbM/V8VeTbcvmpTUvlcgxRURExHO8645HRERE5DJlGAZJOY5MqQg/78qUcpbudareiQCfgELvF3iRq++dzWQy8UQbR+ne2xvf5lj6sRI5roiIiHiGglIiIiIiXiDVasdmOG7Owvy86xbNWbpX2FX3nEpi9b2zdaneheYVm5NhzeDltS+X2HFFRETE/bzrjkdERETkMuUs3QvzM2P2opX3DqQcYOOhjZgwcX3N64u0b0mtvncmk8nEf9v8F4CpG6ZyMvNkiR1bRERE3EtBKREREREv4Czd87Ym51//8zUAV1a+kvIh5Yu0b0mtvne2HrV60CSuCWk5aby27rUSPbaIiIi4j4JSIiIiIl7AufJehJcFpYpbugclu/remUwmE49f+zgAr/30GslZySV6fBEREXEPBaVEREREvMApZ1DKi/pJZVgzWLFrBVDcoJTjWrJsBoZRstlSN9a9kfox9UnOTmbK+iklemwRERFxD++56xERERG5jCXlOLKJvClTavnO5WTlZlE1vCoNYhsUeX/n6nsGjsBUSTKbzPzn2v8A8Mq6V0jNTi3R44uIiEjpU1BKRERExAu4yvf8vCco9eXWLwFHlpSpGM3XLWYTfmZnCV/JBqUA+tfvT63oWpzMPMlbP79V4scXERGR0qWglIiIiIiHZdnsZJ7OJIrw947bM7thdzU571W7V7GPUxor8DlZzBYeu+YxACavnUyGNaPEzyEiIiKlxzvuekREREQuY0nZjoBNkI8Jf4t33J79fPBnjqQfIdQvlLYJbYt9nNJagc9pUMNBJEYkcjT9KNM2TiuVc4iIiEjp8I67HhEREZHLWFKO95XufbXVsepelxpd8LP4Ffs4pbUCn5OvxZdx14wD4IXVL5CVm1Uq5xEREZGSp6CUiIiIiIe5+kl5UZPzL//5t5/UxXCuwJdZwo3OzzS0yVDiw+I5lHaI6Zuml9p5REREpGQpKCUiIiLiYc7yPW/pJ7UnaQ+/HfkNs8lM95rdL+pYpZ0pBeBn8ePRax4F4PnVz5Njyym1c4mIiEjJ8Y47HxEREZHLmLN8L9JLyvecDc6vjr+ackHlLupYpd1Tymn4FcOpEFKBfSn7eP+X90v1XCIiIlIyFJQSERER8bBTXla+99U/jn5SF1u6B6W7+t6ZAnwCeLj1wwBMWjUJq81aqucTERGRi6eglIiIiIgH2QyDlBzvKd9LzU7lu93fASUVlHJPphTAiGYjiA2OZVfSLub8PqfUzyciIiIXx/N3PiIiIiKXsZQcOwbgY4IQH8/fmi3dsZQcWw7VI6tTp1ydiz5eoKX0e0o5BfkG8X+t/g+AiT9OxGa3lfo5RUREpPg8f+cjIiIichk7c+U9k8nk4dH8W7rXq3avEhmPO1bfO9M9ze8hKjCKbSe38cmWT9xyThERESkeBaVEREREPMjZ5Dzcz/O3ZTa7jW+2fQOUTOke5F19zzBKPzAV6h/K/VfdDziypexG6WdoiYiISPF4/u5HRERE5DJ2KtsRNIn0gibnPx34ieMZxwn3D+eaKteUyDGdq+/ZDLC6KT40uuVowvzD+PPYn3y19Sv3nFRERESKTEEpEREREQ9K8qKV977c+iUA3Wp2w9fiWyLH9DXD6bZSpb4Cn1N4QDj3NL8HcKzE544MLRERESk6BaVEREREPMhZvhfh5/mglLOfVEmV7gGYTCa3rsDnNPaqsfhb/PnpwE+s3L3SbecVERGRwlNQSkRERMRDDMMgyVW+59nbsp2ndvLnsT+xmCx0q9GtRI/tzhX4nOJC4hh+xXDAkS0lIiIi3kdBKREREREPycw1yLE7sofCPZwp5ey9dG3Va4kMjCzRY3siUwrgoasfwmKysGznMjYe3OjWc4uIiMiFKSglIiIi4iHO0r1QXzM+ZpNHx7Jo+yIAetTsUeLHdq3A56aeUk6JkYnc3OBmQNlSIiIi3khBKREREREPOeVqcu7ZWzKrzcqqvasA6FitY4kf37kCn7szpQAeveZRAD7/63P+Pv63288vIiIi56aglIiIiIiHJOU4Moc83eR8w8ENpFvTiQ6MpmH5hiV+fE9lSgE0iG1Az1o9MTB4YfULbj+/iIiInJuCUiIiIiIeknQ6UyrS37NBqe92fQdA24S2mE0lf3voqZ5STuOuGQfAh799yL7kfR4Zg4iIiOTn0aDUpEmTaNGiBaGhocTGxtK7d2+2bt163n22bNlCnz59SEhIwGQy8eqrr7pnsCIiIiIlzFW+5+FMqZV7VgJwXcJ1pXJ8T6y+d6ZW8a1oW7UtVruVl9e+7JExiIiISH4eDUp9//33jBw5knXr1rFs2TKsViudO3cmPT39nPtkZGRQrVo1nnvuOeLi4tw4WhEREZGSlews3/NgT6ns3GxW710NQLuEdqVyDk9nSsG/2VLTNk3jeMZxj41DRERE/uXjyZMvXrw4z+OZM2cSGxvLxo0badOmTYH7tGjRghYtWgDw6KOPlvoYRUREREqD1W6QanUGpTyXKbX+wHoyczOJCYqhfkz9UjmHJ3tKOXWu3pkr4q5g8+HNTPlpChOum+CxsYiIiIiDR4NSZ0tOTgYgKiqqxI6ZnZ1Ndna263FKSgoAVqsVq9VaYufxBOf4y/p1iGdo/khZpbkrxeVtc+dElqN0z88MPvZcrFaTR8axfMdyANpUaUNubm6pnMPXcFxrZq7do+//Q60eYtD8QUxZP4X7WtxHqH9ooff1tvkjZYvmj5RVmrtSXIWdMybDMDyXR30Gu91Or169SEpKYtWqVYXaJyEhgbFjxzJ27NhzbjN+/HgmTMj/l7A5c+YQFBRU3OGKiIiIXJTUgEj2x9bDPyeNaod/9dg4Ht/+OH+k/cFdle+iW7lupXIOm9mHfypfCUCdvWsw4ZnbT5thY/TfozmYfZBhFYfRO7a3R8YhIiJyqcvIyGDQoEEkJycTFhZ2zu28JlNq5MiR/PHHH4UOSBXWuHHjeOCBB1yPU1JSiI+Pp3Pnzud9Y8oCq9XKsmXL6NSpE76+vp4ejpQxmj9SVmnuSnF529zZdDyb/YezqFIugu5Nu3tkDFm5WWz7fRsAI7uPpE65OqVyHsMweGVLCgbQtlMXQnw910PreKXj3LXwLpamLGXKLVPw9/Ev1H7eNn+kbNH8kbJKc1eKy1mldiFeEZQaNWoUX3/9NT/88AOVK1cu0WP7+/vj75//ZsPX1/eS+VBdStci7qf5I2WV5q4Ul7fMnRSbo71AVIDnxrP6wGqybdnEhcTRIK4BJlPplRAGWExk2gxyTRZ8fT13Czqs6TCe+vEpDqQe4KM/P+LOZncWaX9vmT9SNmn+SFmluStFVdj54tHV9wzDYNSoUcyfP59vv/2WxMRETw5HRERExG2Ssh19ljy58t53u74DHKvulWZACrxjBT4AP4sf/9fq/wB4Yc0L2Ow2j45HRETkcubRoNTIkSP58MMPmTNnDqGhoRw+fJjDhw+TmZnp2mbIkCGMGzfO9TgnJ4dffvmFX375hZycHA4cOMAvv/zC9u3bPXEJIiIiIsWSlONYiS7Sz3Mr73232xGUui7hulI/lzeswOd0Z7M7iQqMYvvJ7Xz252eeHo6IiMhly6NBqbfeeovk5GTatWtHhQoVXF+ffPKJa5u9e/dy6NAh1+ODBw9yxRVXcMUVV3Do0CFeeuklrrjiCu644w5PXIKIiIhIkRmGcUamlGeCUhnWDNbtXwe4KShl8Y5MKYAQvxDGtBwDwKRVk/CSdX9EREQuOx7tKVWYG4CVK1fmeZyQkKAbBxERESnT0qx2bAaYgDA/z/yNcO2+tVjtViqFVqJGVI1SP58rUyrX85lSAKOvHM2La17k1yO/snj7YrrVLJ2VB0VEROTcPJopJSIiInI5OnW6dC/cz4y5lHs5nYurdC/xulLvJwVn9JSyeccfF6MCo7ir2V2AI1tKRERE3E9BKRERERE383TpHvwblGpXtZ1bzhdo8a5MKYAHWj2An8WPH/f+yNIdSz09HBERkcuOglIiIiIibuYMSkV6KCiVlpPG+gPrAUemlDt4y+p7Z6oUVomRLUYC8PCyh7Eb3hMwExERuRwoKCUiIiLiZs6V9yI81E9q9d7V5NpzqRJehcSIRLec05tW3zvTf679D+H+4fx65Fdm/zbb08MRERG5rCgoJSIiIuJmni7fW7l7JeBYdc8d/aTAu1bfO1N0UDTjrhkHwH++/Q9ZuVkeHpGIiMjlQ0EpERERETc7lXM6KOXnmaCUq8l5gntK98D7Vt8705grx1A5rDL7UvYx5acpnh6OiIjIZUNBKRERERE3yrbZXdlCEf7uvxVLzU7l54M/A9AuoZ3bzuvsKZVlMzAM78qWCvQN5Onrngbg2VXPcjLzpIdHJCIicnlQUEpERETEjZKyHZlCgT4m/C3uvxX7ce+P2AwbiRGJVI2o6rbzOlffM3AEprzNrY1upWFsQ5Kyknj2x2c9PRwREZHLgoJSIiIiIm7kLN2L9FTp3i73l+4BWMwm/MzOEj7vC0pZzBae7/g8AFPWT2F30m7PDkhEROQyoKCUiIiIiBsle7rJ+Z6VAFyX6N6gFHjvCnxOXWt0pX1ie3JsOTzx3ROeHo6IiMglT0EpERERETc6dbp8L8LP/bdhyVnJbDq0CXBvPyknb12Bz8lkMvFCxxcA+PC3D9l8aLOHRyQiInJpU1BKRERExI2ScjyXKfXDnh+wG3ZqRNWgclhlt5/fm1fgc2pWsRkDGwwE4JHlj3h4NCIiIpc2BaVERERE3CjJg+V73+32TD8pJ+cKfJle2Oj8TBPbT8TX7MuynctYumOpp4cjl5GsXDt7UnO8boVKEZHSoqCUiIiIiJvYDYPkHEeWUKQHyvc8H5Ty/kwpgMTIREa2GAnAw8sexm5493jl0rH8QDofbU9h9eFMTw9FRMQtFJQSERERcZOUHDsGYDFBiK97b8NOZp7k18O/Ap7pJwXe31PqTI+3eZxw/3B+PfIrs3+b7enhyGXAMAx2p1oBWHM4g8MZuR4ekYhI6VNQSkRERMRNTp1Rumcymdx67h/2/ICBQZ1ydagQWsGt53by9tX3zhQdFM2j1zwKwOPfPU5WbpaHRySXuhSrnTSr47NhB77Zk0qu3fsDuCIiF0NBKRERERE3cTU590Tp3i5H6V67qu3cfm6nspQpBXDflfdROawye5P38ubPb3p6OHKJO5DuyIyK8rcQ5GPiWJaN1YczPDwqEZHSpaCUiIiIiJskZTuyIDza5DzRM/2koOz0lHIK9A3kqXZPAfDcmudIzU318IjkUnYg3VG6lxjmS5f4EADWHcnk4OnnRUQuRQpKiYiIiLiJM1Mq0s+9Qalj6cf4/ejvgOf6ScEZq++VkUwpgCGNh9AgtgFJWUnMOzLP08ORS9iBNEemVKVgX2pH+FM/0h8D+GZPGlaV8YnIJUpBKRERERE3ObOnlDv9sOcHAOrH1Cc2ONat5z7TmT2lysqS9xazhec7Pg/A18e/Zk/yHg+PSC5FOTaDI5nOoJQPAB0rBxPiY+ZEto0fD6mMT0QuTQpKiYiIiLiBYRgku8r33HsL5irdS/Bc6R7821PKZoC1bFTwAdCtRjfaVGlDrpGr3lJSKg5n5GIAob5mwk6vzBnoY6ZrFUcZ3/qjmexLUxmfiFx6FJQSERERcYNMm0H26RKccDeX73lDPykAXzNYTi86WBZW4HMymUw8cNUDALz3y3uk5aR5eERyqXH2k6oY7JNnZc4a4X40jPIHHKvx5djKRoahiEhhKSglIiIi4gZJp0v3Qn3N+JpNF9i65BxJO8Kfx/4EoE3VNm47b0FMJlOZ7CsF0LV6V+L84kjOTubD3z709HDkEuNcea9SsG++1zpUDibU10xSjp2VB9PdPTQRkVKloJSIiIiIGyR5qHRv5e6VADQq34hyQeXceu6CBFrK1gp8TmaTmetjrgfg9Z9eLzM9scT7GYbBgQxHppSzn9SZAixmup8u49t0PIvdqTluHZ+ISGlSUEpERETEDU6dXnkvws2le86glKf7STmV1UwpgPZR7QnxC+Gv43+xYtcKTw9HLhGnsu1k5hpYTFA+MH9QCiAxzI8m0QEALNybRnYZKn8VETkfBaVERERE3CDJQyvveUuTc6czV+Ara4ItwQxpOARwZEuJlARnP6kKQT74nKe097pKQYT7mUnJsfPdAa3GJyKXBgWlRERERNwgyZUp5b7brxMZJ9h6YisA11S5xm3nPR/nCnxlMVMK4J7m9wDw9T9fs+PkDg+PRi4Fzn5SFQvoJ3Um/zPK+H45kcXOFJXxiUjZp6CUiIiIiBs4e0pFujFTav2B9QDUjKpJdFC02857PmU5UwqgdnRtutboioHBGxve8PRw5BLgzJQqqJ/U2aqG+tEsxlHGt2hvGlllrDebiMjZFJQSERERKWW5doNU6+lG527sKfXTgZ8AuLLylW4754WU5Z5STmNajgFg+ubppOWkeXg0UpZl2ewcy3JkURa08l5B2lYIJtLfTKrVzld7Usm1l93PkoiIglIiIiIipcxZuudnNrkyhdzBFZSq5EVBqTK6+t6ZutToQs2omqRkpzDr11meHo6UYYdOl+6F+5kJ8S3cr2Z+FhM9qobiY4IdKVbm70pRYEpEyiwFpURERERKmbN0L8LfjMnknqCUYRiu8j2vCkpdAplSZpOZ0S1HAzBl/RQMo+xei3iWs59U5UJmSTlVCvalb7UwV2Dq850KTIlI2aSglIiIiEgpc62858bSve0nt3My8yT+Fn8axzV223kvpKz3lHIa2mQoIX4h/H38b5bvXO7p4UgZ5ewnVbEQ/aTOlhDmR9/qjsDUzlQr83amYFVgSkTKGAWlREREREqZs3zPnU3OnaV7V1S4Aj+Ln9vOeyFlffU9pzD/MG5rchsAr69/3cOjkbLIMAwOns6UKmw/qbMlhPrRv3o4vmbYpcCUiJRBCkqJiIiIlLKjmaczpfzdd+v1037v6ycF/2ZK5dgNbGX8l+dRLUcB8M0/37D95HYPj0bKmuNZNrLtBr5miA0sfsC6SqivKzC1O9XKpztSyLGV7c+WiFw+FJQSERERKUV/n8pmb5oVExAfUrxsiOLwxibnAAEWE86uWpll/BfnWtG16FajGwYGb6x/w9PDkTLG2U+qQpAv5ovsNRcf4suA6uH4mU3sTbPy6c5kBaZEpExQUEpERESklKRb7SzZnwZAq/KBlAsoet+Y4sjKzeKXw78AcGVl7wpKmUwmAi6BFficxlw5BoD3fnmP1OxUD49GyhJnP6nKxegnVZDKIb4MqBGGn9nEvrRc5u5IJruM924TkUufglIiIiIipcAwDJbsSyMz1yAmwELruCC3nfuXw79gtVspF1SOxIhEt523sC6FFficOlfvTK3oWqRkpzDr11meHo6UIc5MqYrF7CdVkErBvtxcIwx/s4n96bl8uiNFgSkR8WoKSomIiIiUgj9PZfNPcg5moEfVUCzmiyvPKYoz+0mZLrIsqDRcKivwAZhNZka3HA3AlPVTsBtl/5qk9GXk2jl5elXOSiWUKeVU0RmYsjgCU3N3pJB1CXzWROTSpKCUiIiISAlLtdpYtj8dgNYVgigf5J6yPSdv7SfldKmswOc0tPFQQv1C2XpiK8t2LPP0cKQMcK66F+1vcWUOlqQKwb4MrBFOgMXEgfRcFuxSaamIeCcFpURERERKkGEYLN6bRpbNIC7Qh6vKB7p9DK6glJf1k3JyZUpdAj2lAEL9Qxl+xXAAXl//uodHI2WBs59UxRLOkjpTXJAPN9cIx4RjVb7kHFupnUtEpLg8GpSaNGkSLVq0IDQ0lNjYWHr37s3WrVsvuN+nn35KnTp1CAgIoGHDhixcuNANoxURERG5sN9PZrMjxYrFBNdXDcHi5vK5Y+nH2HlqJwAtK7V067kLy9VT6hJaHWxUy1GYMLFw20K2ndjm6eGIl3P2k6pcgv2kChIX5OMqD9yZklOq5xIRKQ6PBqW+//57Ro4cybp161i2bBlWq5XOnTuTnp5+zn3WrFnDwIEDuf3229m8eTO9e/emd+/e/PHHH24cuYiIiEh+KTk2Vpwu27u2QhAxge4t2wPYcHADALWjaxMREOH28xdG4CW0+p5TjagadK/ZHYCp66d6eDTizeyGwaGM0s+Ucqoe5gfA9mQFpUTE+3g0KLV48WKGDRtG/fr1ady4MTNnzmTv3r1s3LjxnPu89tprdO3alYceeoi6devy9NNP07RpU6ZO1T/+IiIi4jmGYbBobxrZdoOKQT60jHV/2R6c0eTcS0v34NJafe9MY64cA8CMX2aQkp3i4dGItzqaacNqB3+LiXIBllI/X7XTQak9qVZy7ZfWZ05Eyj73//nuPJKTkwGIioo65zZr167lgQceyPNcly5dWLBgQYHbZ2dnk52d7XqckuK4QbBarVit1oscsWc5x1/Wr0M8Q/NHyirNXSmu0p47v57MZleqFR8TdKkUgC03F090cFm3fx0AzeOae+3nxA9HhlS61ea1YzxbYeZPu/h21I6uzdYTW5m+cTqjWoxy1/DEy505f/amOOZ/hUALubm5pX7uSB+DEB8TabkGu5IySQgt3ZLB0rT5RDZ/JVnpXTWIoFJoEC/56b5Liquwc8ZkGIZXhMvtdju9evUiKSmJVatWnXM7Pz8/3n//fQYOHOh67s0332TChAkcOXIk3/bjx49nwoQJ+Z6fM2cOQUFBJTN4ERERuazlWPzZWeEKDLOF8qd2EpV6yCPjMAyDW/+4lTRbGi/VeokaQTU8Mo4LSfcPY2/5hvhZM6l+aJOnh1OiFh1fxDv73yHGN4a36r2Fj8mr/gYsXuBAdC1SgmMol7SXmJR9pXaewKNHid28mZhffyXXbiElpiL24AB8/AxywsPJDgvL89/cgABwcw+8otpeoSlW30DKn9xBVNphTw9HRM4jIyODQYMGkZycTFhY2Dm385p/JUeOHMkff/xx3oBUcYwbNy5PZlVKSgrx8fF07tz5vG9MWWC1Wlm2bBmdOnXC17fs/sVDPEPzR8oqzV0prtKaO4ZhMHdXOkaGjUpBFgbUb4LJdEWJHb8o/jnxD2m/phHgE8A9N96Dr8U7PyPHs2y8vz0Nc0AQ3bt39/RwCqWw8+c663XMf2M+RzOOklIlhVsa3uLGUYq3OnP+zNyZCVaDdk3qUjWkYcmdJDMT0w8/YFq2DPOSJZgKsYDUmQx/f6hUCaNOHcdXvXpQty5GnToQGlpy4yymXLvB3386ql6iEuvQPb6ph0d0edB9lxSXs0rtQrwiKDVq1Ci+/vprfvjhBypXrnzebePi4vJlRB05coS4uLgCt/f398ff3z/f876+vpfMh+pSuhZxP80fKas0d6W4Snru/Hwsk/0ZNnzN0CMhDD+/0u8Rcy6bjjiyjppWaEpQgPdmhIfieI+ybQY+Pj6YvDw740wXmj++vr6MvWosj337GC+te4mhVwzFbFKZkThkYyHF6ihUiQ/zx9dyEXPDMODvv2HxYliyBL7/HrKy/n3dbIZWraBLF6xxcaz5fS+Bp07Q2J6K/8njcPQoHDvm+MrKwpSdDTt3Ytq5E85e3bxKFahX79+vhg2heXPHOdwkOTMXZ4nPwUyb7gHcTPddUlSFnS8eDUoZhsHo0aOZP38+K1euJDEx8YL7tGrVihUrVjB27FjXc8uWLaNVq1alOFIRERGR/E5m2Vh5wLHa3nUVg4n091xACuCnA6ebnFfy3ibn8O/qewaQZTMI9Ck7QanCuLfFvTy3+jn+PPYnX239ihvq3ODpIYmXOJTh6DQXE2DBv7gBKbsd3n8fnnoKdu/O+1rlytC1q+OrQweIiADAFzi4LZk9aVZMlYJpcXohhkxrJhNWjuetlS8RlW6nahLUPQ71jjm+Gp6wEJtig717HV+LF/97rvh4GDoUhg2D6tWLdy1FcCL73y59yTl20qx2QnwV8BUp6zwalBo5ciRz5szhiy++IDQ0lMOHHXXB4eHhBAY6flAOGTKESpUqMWnSJADuu+8+2rZty+TJk7n++uv5+OOP+fnnn5k2bZrHrkNEREQuT4v2pZJrQNUQX64oF+Dp4ZSZoJTFbMLPbCLHbpCZaxBYyDvSA+lWvjuQTrUwP5rFBBT/l/rT9qVZWXckA1+zie5VQvGzlExwLDwgnHubOwJTk1ZNolftXmUqG0wKJyPXzsK9acQGWLi2QlChvscHMhyNzSuHFDPj5OefYdQo+MnxWcffH9q2hS5dHIGounXP2Reqergfe9Ks7EzJoUVsIMt3Lufur+9mx6kd4Aedm/TlvivvY82+NXy+fREP7F1Frj2XqAyoewwan/ChQ1ZFmp4KIH7rISz79sEzzzi+2rSB226Dvn0hJKR413YBJ7LyLh1xIN1K7Yj8FTFFkWMz+GpPKlVDfWke45kVU0Uudx4NSr311lsAtGvXLs/zM2bMYNiwYQDs3bsX8xlpoVdffTVz5szh8ccf57HHHqNmzZosWLCABg0auGvYIiIiImTk2tmX5vgFs1uVEI8HHbJys/j18K8AXFnZu4NSAIE+JnJyDDJtdqBwGWZrj2SyPz2X/em5rD+aSYvYQJrFBBBQhOCUYRjsTbOy+nAme9P+XRkozZpMv+phFx3ochp71Vhe/elVfjrwEyt3r+S6xOtK5LjiPZbuS2N7cg7bkyHNai/Uz4FDmY7ASsWgIv4advw4PPYYvPuuo2wvJASefBLuvRcKuXhT9TBfvj0Af504zK2/jeHD32cBUDmsMm90f4NetXsBcE2Va3i49cOkZqfy7a5vWbx9MYu2L2J18h7eZC8A/p1h1KHK3PNHINU2bMf0ww/www+OgFn//o4A1TXXlGjjdGdQyoQjy/JAeu5FB6W2JWezLTmHvWlWmpUL8PjPcZHLkcfL9y5k5cqV+Z7r168f/fr1K4URiYiIiBRO0ulSkhBfMxEeLtsD2HxoM1a7ldjgWKqGV/X0cC4o0GImGTuZuYVbCNpuGOw7HUQK9TWTarXz46EM1h/NpHlMAC1iAgk4zxLxhmGwJ9XKqsMZ7E93BBPNJqgX6c+25Bz2p+cyd0cK/aqHFSnIdS7lQ8ozvMlw3vz5TSatmqSg1CXmr1PZ/J2UgzOE8dvJbOxA9yohmM8R2LBj4sjpoFShM6VsNnjnHXj8cTh1yvHcLbfA889DxYpFGnOkn5m/D33GZ78+TnrOCUyYGNliJBM7TCTMP/8CUKH+odxQ5wZuqHMDhmGw9cRWFm9fzOLti/lu93dMrrKfyVWgUmt4Yk8VBqzPJGL/MZgxw/FVvbqjtG/ECIiNLdJYC3Ly9M/cGuF+bEvO4WB64ZabPx9nYDrbZpCUY/d4CbbI5UhFuCIiIiLFkJRtByDCzztup84s3SsLf+139pHKzLUXavujmTaybQb+ZhN314ukV0Io5QIsZNsMVh/O5K0tp/jhYHq+4xmGwc6UHD7clszHO1LYn56LxQRNywVwV71IelQNZWCNcAIsJg6k5zJ3ewpZtsKN6UIeav0QFpOFZTuXsfHgxhI5pnheutXO0n1pALSKC6RXQigm4I+T2XyzJw37Of7wnuUXgs1wzP1C/dxYtcrRTHzkSEdAqnFjRzbSBx8UOSC189ROus3pxvsb7iE95wQJkfVYc/sapnSfUmBA6mwmk4k65eow9qqxLL5lMUcePMJ7vd6jQ2IHDoabuLvRXiJvP0bb4WaWtK2MNSgAduyAJ55wlBR++KEjw6uYDMNwZUo1jHJkRx3KyMVmL/4xAfak/hvYOny6tFJE3Ms77qJEREREypikHMcvSN6QJQVlp5+UU+DprKZMW+F+qdyTmgNA5RAfLGYT9SL9ub1OBL0TQokJsJBtN1hzxBGc+v5gOhlWOzuSc/jgn2Tm7kjhwOlgVLOYAO6uF0nn+BDCT6+UGBfkw82nA1MHM3L5ZHsKWYUMlp1PQkQCAxsOBGDSqkkXfTzxPMMwWLwvjUybQWyghdblg6gb6c8NCaGYgS2nsvn6HIGpTP9QACoF+54/cHzoENx6K1x7Lfzyi6NZ+RtvOPpJXXttkcf82rrXaPBmA5buWIqfxZ/OdR7j/nYrLupnRURABLddcRvLhyznwAMHeKXLKzSv1Jwfqtjpet1+Iu/L4s6bfNlTNRxOnnRcT69ecOBAsc6Xlmsnx25gAqqF+RHoY8JmwJHM4geSUnJsJOX8+zlXUErEMxSUEhERESmGU6dLSSL8vCQotf90UKoM9JOComdKOctsqob6uZ4zmUzUifRneJ0IbkwMJTbQQo7dYO2RTKb8cZJPd6ZwMCMXHxM0jwngnvpRdKocQmgB37O4IB8G1ggn0GLiUEYuH21PLvTYnGx2g1+PZ/H5zhTXL7iPtn4UgM//+py/j/9dpOOJ99lyytGDyGyCHlVDsZgd87hOpD83JDoCU3+eyuar3an5AlPOoFTl4HN0UDEMePNNqFXLkVlkMsGdd8I//zh6R/kUvfPK7N9mM3bJWDJzM7ku4To23/UbnWrfT4bNh+NnNQ4vrgqhFRh71Vg23LmBraO28mTbJ6lYoSbvNrJSY0gyT7Q3ketrga+/hvr14b33ipw15cySivA342M2USnIUf7oLMUtjjN7yoEj80pE3E9BKREREZFicGZKRfp7/nbqWPoxdiXtwoSJFhVbeHo4hRJ4um9TYXpKOfpJOX5hrFJALx6TyUTtCH9uqx3BTYmhlA+0YAC+ZmgZG8jd9aPoWDnkgsvHlw/yYWDNcIJ8HL1/ChuYyrUbbD6eyTt/nWLRvjT+Sc5h7ZEMAOrH1qdX7V4YGLyw+oULHku8V2qOjWX70wG4Ji6I2LOWjawd4U/vxFDMJvgrKYcvdqdiOx18MQyDTD9HUKpicAH9pI4cgR49HKV6aWlw5ZWwfj1MmwYxMcUa7+6k3dy78F7AERxdMWQF9WJqUSXUcf4dKTnFOu751Iquxfh249k6aisb7tzATY3680wbg8Z32vgzIRiSk+H22x0rBe7ZU+jjnjwdlIr2d7znlU4H9g5cRF8pZ+letdPvx5GM3EL1PBaRkuX5uygRERGRMsjVU8oLyvecpXt1ytUhPCDcw6MpHFemVCH6Nx3OyCXHbuBvMREbeO7322QyUSvCn2G1I7i9TgT31o+ifaXgCwajzhQb6MiYCvIxcTTTxpxtyWRYCx5jrt1g07FMpv15iiX70knJseN3OnPmzFKgcdeMA+CD3z5gX/K+Qo9FvIdhGCzal0a2zSAuyIerygcWuF2tCH9uTAzFYoKtSTl8sSsVm90g1WqQ6+OPGahw9sp7X38NDRvCwoXg7w+vvQZr1jj6SRWTzW7j1vm3kpKdwtXxV/N0+6ddJYPVwxzZhqURlHIymUw0r9icT/p+woc3fsj++DAa3ZrO4139sPn5wNKl0KABvPUW2C/8M+DE6czU6ADH57/S6eD0gfTiB5KcmVJNYwLxMUG23eBUdsn0kxORwlNQSkRERKSIcu0GqVZno3MvCEqVsdI9KFqmlPOXxyohvudc2exMJpOJmEAfV9+qoooJ9GFQzXCCfUwcy3JkTGWckTFltRv8fDSTt/88xdL96aRY7YT4mulYOZgR9SIBSM6xu7Ksrqp8Fe0S2pFrz2Xy2snFGpN41m8ns9mZYsVigh7nWWEPoGa4PzclhmExwT/JOSzYncq+02VmMYEWfE8HLsnIcJTl9ewJx45Bo0aOvlFjxoD54n5Ne27Vc6zau4pQv1A+uPEDfMz/BsKcQan9abkl1tT/fAY3Gsxvd//G1YnXMvGqHOqNyOXv2tGOjLB774UOHRxN0c/DmSkVdTooVSHIBzOQZrWTco6g8fkkZdtIzrFjxvFzxZn1dijj4lf0E5GiUVBKREREpIiST5fu+ZlNBPl4fqW7stbkHIrWU2pv6r9BKXcpF+AITIX4mDmWZWPurnRyLP5sPJ7N21tOsvxAOmlWO6G+ZjpXDubuepE0jwkkxNfsKuksKFvqf5v+x/GM4267Drl4yTk2Vpwu22tTIYhygRfu7VQ93I8+1RyBqW3JOSw/mAlAxaDTQexNm6BZM0emEMADD8BPPzmyhy7S+gPreXLlkwBM7T6VapHV8rwe4W8h2t9R4ro7xT1BmKoRVflu6HdM6jCJnbE+1B9wgid6h2ELDICVKx0BuRdfhPT0Avc/4Srfc7x/vmYTsaczzg4Wo6+UM9BdIdgHP4uJuNPHUrNzEfdTUEpERESkiJwlHuF+5vOvouUGdsPO+gPrgbIWlCpcppTNMNiX7mxy7r6gFEC0MzDla+ZEtp0dlZqz8nAW6bkGYb5musQHc1e9SEf5j/nfeVDhdBPmMxsnd6rWiaYVmpJhzeD1n15363VI8RmGwaK9aeTYDSoF+9AituCyvYJUC/Ojb7UwfEzgnOYV/YHnn4erroK//4YKFRylbJMnQ0DARY83LSeNwZ8PxmbYGFB/ALc2uvUcYyu9vlLnYjFbePSaR1l3+zpqxtTmmSYp1Lozi+2NKjuyxh5+GBIS4NlnISnJtV+OzXBlQznL9+DfvlL7i9FX6szsS+DfoNRFrOYnIsWjoJSIiIhIEf3b5NzzpXv/nPiH5OxkAn0CaRB78VkW7nJmT6nz9YQ5nJGL1Q6BFhMxAe5/v6MCLAyuGU7I6fGG+5roFh/CXfUiuaJc3mCUU0FZFyaTyZUtNWX9FFKzU90werlYm49nsTvVio8Jrq8SWqjy0TMlhvnRt7ojMBV2cC91+vWARx8FqxVuugl+/x06dSqx8Y5dPJbtJ7cTHxbPW9e/dc6gefVwRwnfzpQctzf3blaxGZvu2sQ9ze9hZxTU6r2f/95SieyqleH4cfjPf6BqVXjsMTh6lJOn+0kF+pjylORWCv63r1RRGIbhyr6sejoo5ezzdSTDlm/VRBEpXQpKiYiIiBRRUrZzeXLPB6Wc/aSaVmiKr8W9mUQXw9lTymbA+VrCOFfIig/x9VhWWqS/hVtrhBB/dAu31QqlcbkALAUEo5zOVQp0Y50bqRVdi6SsJN7Z+E6pjlkuXlK2je8OOsrJ2lYMdvUzKqqEEF9GbPiSOwa0wffHHyE4GKZPh88+g+joEhvv/L/mM33zdEyYmHXjLCIDI8+5beVgX/zMJtJzDY5k2kpsDIUV5BvEm9e/yVcDv6JcSAxP1zhA8JD9PH93Q1JqVoGUFJg0CRIS8L1/LKGHD7hK95ycmVJHM3Kx2gsfSErKcfShMpv+bZgeHWDB1ww5dsMVBBMR91BQSkRERKSIXCvv+Xn+Vqos9pMC8DWD5XRc53wr8DnLbNxdune2IB8zIVlJWAoRGCt/eoXAFKud9DMibhazhUdaPwLAy2tfJjs3u3QGKxfNMAy+2ZuK1Q7xIT40jylmad2aNdCqFWEjbscvPR17ixaweTMMHw4lGGQ9mHqQO766A4CHWz9Mu4R2593ex2xyfabcWcJ3th61evD7Pb/Tr14/7BYTj8b9TsTAvdx1R3kO1Y2HzEyip73B3b1a0PaJMfDPP659w3zNhPiasZO3VPZCnFlSFYN8XE3nzSYT5QPVV0rEEzx/JyUiIiJSxjjL97wiU+pA2Vt5DxzlbBdagc9mN9if5v4m5xfL32J2ZXWc/QvuLY1uoXJYZQ6lHeL9X9/3xPCkEH4+lsW+tFx8zY6yvSJn6e3YAf36QevW8NNPGMHB/DV4MLaVK6FmzRIdq92wM3TBUE5mnqRphaY8dd1ThdrPuQrfTg8GpQDKh5Rnbr+5bB+znQdbPUhEUCTTKh+hYv99dL/Nj83147DkWomf+wHUqQMDBsDff2MymVzZUgeL0Fdqj/NnylmBbmeGY1ECXCJy8RSUEhERESkCwzBc5Xue7imVac3ktyO/AWUvUwouvALfwYxccg0I8jFRzgP9pC7GuRon+1n8+L9W/wfAC6tfINeuX4C9zcksG9+fLtu7rmJw0YLPp07Bgw9C3bqO8jyzGe64g9w//+Sffv3At+SDq6+te43lO5cT6BPI7Jtm42fxK9R+zmbnB9JzySjEKpilrVpkNV7s/CL7H9jP/3r+j0ZxjVhUNYem/Q5z1e3wfeMoMAyYOxfq14fhw0k8eQiA/YXsK2UYxr/ZlyEFB6WUKSXiXhdez1REREREXNJy7eQaYALCPFy+t+nQJnLtuZQPLk+V8CoeHUtxOJoW286ZKXXmClmeXuWwqOKCfNhyKrvArIs7m97JMz88w45TO3hpzUvUjq7NycyTnMw8yYnME67/P/OrWmQ1pveaTvWo6h64msvLbyeyyDUc8+6KcoUs27Na4a23YMIEOHnS8VynTvDSS9CokeP1zZtLfKy/Hv6VR1c8CsDLXV6mTrk6hd43zM9CTICFY1k2dqXkUD/q4lf/KwlBvkHc0fQObr/idn7c8yNjlr3MBtPXtIs/ScNW8MKqQLr+kQkzZtB49mxy+wxl04gHMBIvnNF2KttOmtWOxfRvo3QnZ7Pzo5m52A2jyE3tRaR4FJQSERERKQJnP6kwP3Oh+guVpjNL98pa0AbyrsBXEGeT87JUuudU4TxZF8F+wYy5cgxPrnyScSvGFep4+1L2ceW7VzJ/wHyurXptiY5V8jpyOrutbqTfhT9XhgFffgkPPQTbtjmeq1fPEYzq2rVE+0adLdOayeDPB5Njy6FnrZ7c1eyuIh+jergfx7Iy2Zli9ZqglJPJZKJRhdbc3LwePbIOYcqcy7RN0+gWd5TWV/vw2S+1iFv/J80/+h+NFswm676xBD76MISHn/OYe9IcpYqVgn3zrZwZ5W/Bz2wix25wIstGTKB+VRZxB33SRERERIrAtfKen+fLycpqk3On8/WUyrUbrj4xnm5yXhyxgT6YgDSrIzMjxDdvVt2YK8ewYtcKjqQdISowiqjAKKKDookKiHI9dn4F+QbxwNIH+Pngz3SY1YF3e73LkMZDPHNhl4Gjp4NSsRcKSqxbB48+Ct9/73gcGwtPPQW33w4+pf9r1iPLH2HLsS2UDy7P9F7TixWYrh7mx7ojmexMyfHK7CDnSngJEZW5o9XTPHrNo9wy/xYWsIAKFf9kZr9buH7Wn5T7fRM89yxMe9vxPRk1CgID8x1v73kC3SaTifJBFval5XIoI1dBKRE30SdNREREpAhOuZqce74150/7y3hQ6jyZUgfTHf2kgn1MRHlBQ/mi8rM4+mAdy7JxOCOXGuF5+/xEBETw/bDvC32874d9z5D5Q5j31zyGLhjK1uNbebr905hNnp+Hl5J0q53000HSmIBz/Kr0xx/wn/84MqQAAgLg/vsdwZCwMLeMc+mOpUxZPwWAmb1nEhMcU6zjVAr2wd9iItNmcCgjN19Jm6edyHL8vI0+3VMu2C+Yef3nMW75OF5Y8wLD0j/kugf7cM/hu+j2v5cJ2b4VHn4YXn0VnnjCESA83cfrzH5SZzc5d4oL9GFfWi6HM3JpFF361ycianQuIiIiUiTJp8v3PN3k/EjaEfYk78GEiRaVWnh0LMXl6ClVcKaUs8ymLPaTcirvWs2r8CuDnUuQbxBz+83lsWseA+DZVc8y4LMBZFgzLvrY8i9nllSkvxk/y1nzbudOuPVWR4+oL790NDEfPhy2boVnn3VbQCotJ40RX40AYFSLUXSt0bXYxzKbTCSeDtDsSPbsKnwFcQWlzvh5azaZeb7T87zb8118zD58t2se/xfwP/730ecwcyZUrQoHD8I99zgazs+ZA3Y7J7JspOca+JigYlDBAccKQY73Qs3ORdxHQSkRERGRIjjlJeV7ztK9ujF1CfN3zy/DJS3Qcu7V91wrZIUWbiUxb3S+vlLFYTaZmdhhIjNvmImv2ZfP/vyMdjPbcSj1UIkcX/4NSpU/s3Tr0CEYORLq1IEPP3T0kerbF7ZsgenToYp7Fxl4/NvH2ZO8h6rhVZnUcdJFH696mOMztiPFC4NS2Y7vR1QBq2/e3vR2lt26jMiASPad+pmnvu3Kps5XOIKEr7/uKKfcsQMGD4YrriDp8y/AMArsJ+XkXIHvSGYuNqPgBRhEpGQpKCUiIiJSBEmu8j0PB6XKeOkenDtTymo3OHh6ifey2OTc6cwl5o0S/AV3aJOhLB+ynKjAKDYc3MCV717Jr4d/LbHjX86OZjo+37GBPnDqFIwbB9Wrw5tvOlbQ69IFfv4ZPv3UEaRys3X71/H6T68DMK3nNEL8Qi76mNVOB6WOZNpIsxa86ICnnDyrfO9s7RLase6OdcSGVCcpcx9tZ17Dor3fwujRjoDUM884Gp//9hs1bunLLcOvp+Hv6855vkh/M/4WEzYDjp+eCyJSuhSUEhERESmkbJudjNMBlAg/z95GlfUm53DunlIH0q3YDAj1NRPpBb27isvZ7Dw91yC1hH/Zb1O1DT/d8RO1o2uzL2Ufrd9rzVdbvyrRc1yOjmTm4p+aQu23XoZq1eC55yAzE1q1gpUrYfFiaNbMI2PLseVwx5d3YGAwpPEQOlfvXCLHDfY1u7L6dnpRtlRW7r/9vc7XV65WdC1e7rGCxOirSctJpcdHPZjy0xQICXH0/tq5E+Phh7EGBFL51w006NPVsTLipk35jmUymYg7nSV3OFMlfCLuUHb/lRcRERFxs6TT/aQCLCYCfDx3G2U37Gw4uAGAKyuX4aDUOVbfO3OFrLLaTwrA12wiJtDxy3Rp9KipEVWDtbevpUNiB9Kt6dzw8Q28svaVEs3Kupzk/vkXV4x/kHu7NSL6qScgKQkaNnT0j1q9mv9n777DoyrTh49/z/RMeoFUQknovYOIIlJVFBUs2Huv6/5W3H1d3V37rq69rwWxYZcm2AsgvUsJAUIa6ZmU6XPeP85M6JAyySTh/lzXuUKGmTnPTM5M5ty5C6efHtL1PfbLY2wp3kIHaweenvR0UO+7W5S/r1QrCkoFJu9FGnWY9cd/v+0V15FrR89jXMbl+FQfdy6+k9sX3k5+VT5qbCzFDz3Cq1+uZP3Ma1ANBvjmGy24eNFFWrnfQZKCXHYrhDg+CUoJIYQQQtRToHQv1E3Ot5Vsw+a0YTVa6dexX0jX0hSBTCmXT8XrOxBIOdGErLakuU9wY8NiWXTZIm4cciMqKvcuuZcz3z2TdQXrmmV/7Y7PB/Pnw+TJGPr2YejH/8NcW4Papw/MnQvr18O0aRDi4OiWoi088ssjADw/9XnircEdDRfoK7XH5g5aLyWvT2VDiaOuBK+hAk3O6zN9U+sTZWJa/2d4/MzHUVB4cdWLpD6dSsRjEZz65hBe2XM/D86M4qNPHqZw+gRURdHKMPv2hauvrgtOJdUNKJCglBAtQYJSQgghhBD1VFHX5Dy0H6FW5Go9UYalDMOgO8bY+jbAolcInOrbvdqJsMurku8/GezchvtJBdSVAjXjCa5Rb+SVc17hmcnPYNab+WHPDwx9bSjXfnmtNEE/lspK+O9/oUcPLei0ZAmqorBj3FS+e+dLlM2bYdYsbcJeiHl9Xq7/+nrcPjfTekzjor4XBX0fyVYDVoOC06fyzb7qJmfbeXwqn+22sWhfNfP3VjXqPkqdx+8ndbCOYXqMOnD54Nqhf+Kziz+jV0IvdIqOWnctO0s3saVgAV9vfZZL1/+V5EHfMuBmla976cDrhXfeQe3dG/WimaTt2ARoTe89Psk6FKK5hf5dVgghhBCijahwaeV7oW5yvnzfcgBGp40O6TqaSlEULIdN4MurceNTIcqoIzrEwb9gCPTqKbAHt9n54RRF4e5Rd7P99u1c2u9SVFTeWv8W3Z/vzj9/+ie17tpm23eb8scfcOutkJoK99yjNcOOiYH77uO3Xzfy2dPvoo4fH/LMqIO9uOpFVuSuINIUyUtnv9QsJa2KojApLQIF2FjqZGFONb5GHq8en8rnu23ssmkZj/m1Hmoa0VOtLlOqHkEpnaKQbNWC2Pk1Hqb3ms4ft/2B/a92tt+2nRtHf8i0fo9y3ZDbOav7WfSI78G2ZAPnXuJjxPXwRU9QVBVl3idEjhrOebecQ/Ka5ZQ0MstLCFF/bf83vRBCCCFEC6nLlAp1UCq3fQSl4MgJfAeX7rXlflIBHcIM6BTt8dlaYLJZ55jOvH/h+yy/bjmj0kZR467hwR8fpOcLPXlv43v41NY1Xa1F7NunZUWNGQN9+sDLL0NNjVa29eqrkJsLTz1FTodOgH/yXiuxt2IvD3z3AABPTnyStKi0ZttXr1gz53aJRAE2lTlZsLfhgSm3T+XTbC0gZVC0flDQuAbqdZP36vl+mxqu/dxya9x1l5n0JqKt3eja4UzGZd7Ia+c8x4JZC9h++3bsf7WTfWc2f//zfL5+4lpG3xXBe/3Bq0Dv33/niuvPpXpkBoXz3gbp0yZEs5GglBBCCCFEPQV6SoWyfK/SUcnW4q0AjEobFbJ1BEugr1StfwLf3oOanLcHBp1CB3+mR0v2qBmVNopl1y7jgws/ID06nVxbLld8fgWj3hjFrzm/ttg6QmbPHvjPf2DUKEhP17Kili3TyvGmT4fvvoNNm+DGGyE8HFVVKfJPW2stQSlVVblp/k3UuGsYmz6WG4fe2Oz77B1r5jx/YGpLecMCU4GA1O4qN0YdXJQRTf84M9DwoJRXVSlvQPkeaH2lAPJqDn2d7fUHutMiDOgOCnQbdAa6xnbl7B5n8+Z5b/LDv4uxfvQpd/53Mq8P0+HUQ+bmHJIuuoZtXSJZ9Pj1FFftb9DjEEKcWJM+UblcLrZv347HI03ghBBCCNG++VSVSmfoy/dW5q1ERaVrTFcSIxJDto5gCZTvOTwqTq+vLnDTuR00OQ8IlBW19DQvRVG4pN8lbLttG4+Of5QIUwSr8lcx9q2xXDTvIvZU7GnR9TS7XbvgiSdg+HDo2hXuuw9+/10rxRs7Fp59FnJy4PPP4bASvSq3D4dXRQck1DMI0tze2/ge3+z6BrPezOvTXkentEwwvFesmeldI9GhBaa+3lN1wsCU26fyyS4bew4KSKVHGsmI1hqoZ1e5G5R1VeH04gOMugPZVicSyJQqc3rryoHhwDTPE/WosxgsXND7Al68czGD5xdwx2sP8/aZadQYoVdODVNnv4knNZnCUwfBvffCG2/A8uXalEYhRKM16p2ttraW6667DqvVSt++fcnJyQHgjjvu4PHHHw/qAoUQQgghWgOby4cP0Cv1P0lqDnWle53afukegLWufM9HbrUHFYg26Yg2tY7AQDCEesR8mDGM2WNnk3VHFjcMuQGdomPe1nn0fakvTy9/Go+vjf6B2eGAn36Chx+GIUMgMxPuvx9Wr9Yyos44A158EfLy4Oef4c47tV5SR7HfnyUVb9Fj0IW+bLSopoi7v7kbgAdPf5CeCT1bdP89Y/yBKQX+qHDx1Z6qY07lc3lV5u2ysbfajUmncHFGNJ38AaBkqwGLXsHpVY/IYDqegyfv1beMN8ygq5vUF9iXT1XZV9PwaZ7dY+PpNvhWip5aR8W2Tay4ZhK2MD3JVSpJv22AZ56BG26AU06B2FjtuJo4Ee66SysJXb4c3O4T70gI0big1OzZs9mwYQM//vgjFoul7vIJEybw0UcfBW1xQgghhBCtRaB0L9qkP6QEpKW1p35ScFBPKa9a10+qPUzdO9jBQanmbHZ+IokRibw27TXW3bSOseljqXXX8qclf2LUG6NYV7AuZOuqt9parezuwQfh9NO1BuXjxsFDD8G6daDXw4QJ8MorkJ8P33+vNTVPTj7hXRfZtdd3ayndu2vxXZTZyxiYOJA/n/LnkKyhR4yZ87tGoldg2zECUy6vyse7KskJBKQyo0g76PWrUxS6RfmzpRpQwldWV7rXsJ9HIFsq3x+IKrJ7cXpVzHqFxAb8bKOMOsIMCj5ASezJqP99g3V/KS89cxk3TINnRsHKvjF4U/3HVn4+fPstPPcc3HyzFqzq2BEuuQTeew9KSxv0OIQ4mTTqXfeLL77go48+YtSoUYdErvv27cuuXbuCtjghhBBCiNaioq50L3RZUj7Vx++5vwPto58UQNhB0/cCk64aktHQFnSw6NEr4PCqVLp8IW+UPyBxAD9e/SNvrn2TPy/9M2sK1jD89eHcO/peHhr3EFajNaTrq1NdrfWB+uknbVu58sjsk6QkLUA1YYLWKyohoVG7OtBPKvQZevN3zOfDzR+iU3S8ce4bGPWhez10jzZzQVeFz3bb2F7h4ovdVUzvEolep+D0+pi3y0ZujQezPyCVEn7kWjOijGwtd7LL5uL0lPB67TeQKVXfflIBqeFGNpU5yfVnSu2t0gJhncKNDfpjgqIoJIcZyK5yU1jrISXciCEymlvvfo/PJ1/IVV9cRZWrgqQIC59MXsiYmjjYulXbtmyBVaugpAQ++kjbdDoYPRrOOUfb+vZtVRMehQilRgWliouL6dix4xGX19TUtIspKUIIIYQQh6ubvBfCsrIdpTsod5QTZghjYOLAkK0jmAKZUhUub115W3tpch6g1yl0DDNQUOuhoNYT8qAUgE7RccPQGzinxznctfgu5m2dx1PLnuKTrZ/wyjmvMCljUvMvwueDggLIzta23bsP/Ds7W/u/w6WmakGoceO0r927B+XkvrU0Obc5bdyy4BYA7h11L8NShoV0PQAZ0SYu7BbFp9k2dla6+HxPFWd1iuDT3TbyajyY9QqXZESRfJSAFEBXf6ZUkd1LlctLZD3eQwOZUnENfK0EMqUKarUeVgdP82yoJOuBoNTBzu99Pn079uXCjy9kc9FmTv9sGk9MeIJ7r773wLmw16sFUefP17aNG+G337Rt9mzo3FkLTp19Npx2GoTXL1gnRHvUqHfdYcOGsWDBAu644w6AuhffG2+8wejR7SOVXAghhBDiYOWByXshDCgs36eV7g1LGRbS7Ilgsvin7wX6ScWadUS1o35SAUlWLShVWOuhd6y5QbetcGolSInW4AdMkiOT+Xjmx3y9/WtuXXgruyt2M/m9yVw+4HKenvQ0HcI7NG0HPh/s2wc7dhzYdu7Ugk579oDTefzbp6cfGoTq1i3oGSYur0q5PxMy1EGpB757gFxbLhmxGTx8xsMhXcvBukWZmOEPTGVVuni5qgy3TxtUcElmdF2J6tFYDTpSrAbyaz1k29wMTDj+61tV1UZnSiVY9Jj9Paz213rYV934QHfgMR1tamaP+B6suG4FN82/ibmb5nLf0vtYnruc/533P6LMUVop6ejR2vbII1qD/QULtO2772DvXq3f2YsvgsEAI0Zox/fpp8OYMRAR0eD1CtFWNepd99FHH2Xq1Kls3boVj8fDs88+y9atW1m2bBk//fRTsNcohBBCCBFygUyp2BCW763IXQG0n9I9AKteez4DnWraW5ZUwPFOcI/H6fXx7o4KHF6V63vFEtdMk+Gm9ZzGuC7j+Nv3f+P5lc/z3sb3WLRzEU9PfporBlxx4mqI6mrYsOHQ4NOOHZCVpTUkPxa9Xssa6dZNm5jXrduBf3ft2uhyvIYodmg/k3CDQngIhxisylvFS6teAuC1aa+1njJKv67+wNQn2bZ6B6QCMqJN5Nd62GVzMTDBctzr1npUHF7tHSG2gX8EUBSFFKuB3VVuVhc7cPlULHqFxEaUZSb7H1eJw4vbp2I8rAF+uCmcOefPYXTaaO755h4+/eNTNhdt5tOLPqVvx76H3ll6Otxyi7bV1mr9zubPh4ULtaDtsmXa9thj2mti6NADwdhTT4WoqAavX4i2olFBqVNPPZX169fz+OOP079/f5YsWcKQIUNYvnw5/fv3D/YahRBCCCFCSlVVKlz+nlIhzOJpb03OAcIMh57odY4whWglzSvJn4Gz39/svL4tL1YXO6j1aCfo60rsnJnWfBkUkeZInp36LLP6z+KGr29gU9EmrvriKj7c/CFzzp9DvDX+wJVVVStJWrwYvvkGfv312NPGjEZtMl6PHtrWvTtkZGhBp06dtEyREGoNpXsen4eb5t+EisoVA65gfNfxIVvL8XSJMnFJZjTrSx2M6BhW7+csI8rELwW17Kly4/Wp6I8z4bDUGRgqoTsiEFQfqeFGdle52VquZeF1ijA2qsVMhFFHuEGhxqNSZPeQepTyREVRuG3EbQxNGcrMeTPZXrqdEW+M4I1pb3Bp/0uPfsdW64HeUqqqla0G+qb99JOWQbhypbY99ZTWj2rw4AOZVGPHahP/hGgnGv3Om5GRweuvvx7MtQghhBBCtEoOr4rT/5f7UJXv2Zw2NhdtBmB0p/YUlDo0M6W9NTkPSAjTY1DA6dNKxeqT8eTw+FhZZK/7flOZk9NTwjE04kS9IUamjWTNjWv497J/8/BPD7MoaxFDXhvC5xPeYMjm0gOBqMLCQ2+Ylga9ex8IPgW29PSQB56OpzVM3ntp1UusK1xHjCWGf0/6d8jWUR9pEcZDJuzVR2KYvi7As6/GTZfIYwefyxpZuhcQ6CsVyL5s7DRPRVFIshrYZXNTUHv0oFTAqLRRrL1xLZd+einf7f6OWZ/NYlvJNh4a99DxA2KKciA78JprtMtycrTg1I8/al937YI1a7Tt6ae12wwceCBIddppEB9/7H0I0co16p134cKF6PV6Jk+efMjl33zzDT6fj6lTpwZlcUIIIYQQrUGgdC/coDTqL/fBsCpvFSoqXWK6kBSRFJI1NIfA9D3QmhpHhLB8qjnpFa3Zeb6/r1R9glKrix04vSoJFj0ur4rN7WNbhZN+cccvfwoGo97I7FPvZ0Z1Ot88ewfDN+cw6N5JB870Qcv4GD8eJk+GKVO0bKg2KNST9/Jsefzt+78B8PiZj9Mx/MiBUm2doih0izKxqczJrkrXcYNSpf5yyvhG/gEgJfzQU9ymBLoDQanDm50fTYfwDnxz+Tf89fu/8sRvT/CPn/9BSW0Jz019Dr2uAY8lPR2uuELbAPLyDs2k2r4d1q/Xtmef1a7Tr9+BvmunnQZHGUomRGvVqN/6999/P16v94jLVVXl/vvvb/KihBBCCCFak3J/6V5D+5sEU6B0rz31kwJtMp3JH+jr3E6zpAIO9JU6RpnbQRweH6v8WVKnJlnr+vCsLzlOf6ZgKSiAJ5+Evn3pftbl3P5NOSPzQKfCxo6wcHpfHIvnQ1kZfP013H57mw1Iqaoa8vK9e765hypXFaPSRnHD0BtCsoaWkOGfwpdtO/7xHyjfi7c07udh1uvo4A/6humVun83RrJVe0+qT1AKQK/T8/iEx3lh6gsoKLy0+iVmfTYLp+cEDf2PJzUVZs2CV1+FbdsgPx8+/FDrT9Wnj3adzZvhhRdg5kxITNQyFK+5Bt54A/74Qxs4IEQr1ahX+s6dO+kTeAEcpFevXmRlZTV5UUIIIYQQrUkgUyqkk/faYT+pgDCDgsulttsm5wGBxsmF9hOf4K4stuP0qXSw6OkZY6LGo/JrQS25NR6K7R46BDuA4nJpAaa33tLK8wJ/gA4Lg3POQZ0yhZfjdnHHxsfxqVsYsPsBPh3ek0xz2wxGBVS4fLh9oFcaXy7WFIuzFjNv6zz0ip5Xzn4FndI+MwUBukQaUdCCThVO7zHfTwOT95rS1D813Eixw0t6ZOP6SQUEAsmlDi8ur4pJX7/7um3EbSRYE7ji8yv4eMvHlNnL+Pziz4kwBaEnXHIyXHyxtgEUFcHPPx/IpNq0SZtwuXMnvP22dp24ODjlFG2y35gxMGyY9toWohVo1LtedHQ02dnZR1yelZVFeHh4ve/n559/Ztq0aaSkpKAoCl988cUJb/Piiy/Su3dvwsLC6NmzJ++++25Dli6EEEII0WB1QakQNTlXVbVu8l57DEqdkmilV4yJzOj22eQ8IHCCu7/Wi09Vj3k9u8fH6iItI+rUZCuKohBh1NEjRnt+1gUzW2r9erjrLkhJgRkztJH1Xq92Avv661rfqI8/Rrn2Wm6d/gjfXvEtHcM7snH/Roa9Noyvtn8VvLWEwH5/gLCDxYCuCcGLxrC77dy28DYA7hp5FwOTBrbo/luaxaAjLUJ7DeyyuY56HY9PpdKfmdrY8j2AkYlhZEaZOCWxaRMMI4w6Io06VA4cK/V1cb+LmT9rPuHGcL7N/pbx74ynpLakSes5qo4dtdfu889rwwfKyrTX8QMPaOV8YWHaZfPnw+zZWnlfdDSMHg1/+Ys2AdBmC/66hKinRgWlzjvvPO6++2527dpVd1lWVhZ/+tOfOPfcc+t9PzU1NQwcOJAXX3yxXtd/+eWXmT17Ng899BBbtmzh4Ycf5rbbbuPrr79u8GMQQgghhKivusl75tBkMews20mZvQyLwdIuT1wHJliY3jUqZP26Wkq8RY9RBy6fSpnzyFYYASuL7Lh8Kh3D9PQ4KFA3KF4r4dtS5sTlPXZQ64SKiuC557SJXoMHa/8uLdUCU/ffr5UI/fYbXH/9EaPoz+h6BmtvXMvotNFUOis578PzeOC7B/D4GnbC3lqEsp/Uo788SnZ5NqmRqTw07qEW338oHCjhO3pQKvC6MOsVrIbGvx/EmvXMyIgi0dr0jMJAMLm+JXwHm5Qxie+u/I64sDhW5a9i7FtjyanMafKajis2Fs46Cx55RGuWXlEBv/+uNUm/8EJIStImZa5YoZXpnn22dpvhw+G++7SMyYqK5l2jEAdp1Kv0ySefZMqUKfTq1Yu0tDQAcnNzGTt2LP/+d/2nRUydOrVBTdHnzJnDTTfdxMX+VMVu3bqxatUqnnjiCaZNm9awByGEEEIIUU+hzpRavk8r3RuaPBSTvn1nE7VnOkUhMcxAbo3W7DzhKD1zaj0+Vhcf6CV1cOlRl0gjMSYdFS4ff1Q4GRjfgIbndjt89RXMmXNoeZ7JBOedp/WfmTixXlPyUqNS+fHqH/nzkj/z3MrneOzXx/g973c+uPCDNtekO1ST97aVbOOJ354A4LmpzxFpjmzR/YdKRpSJH/Nr2Vvlxu1TjwhE103eM+ubVHYXTElWAzsrXY0KSoE2zfLXa35l0nuT2FayjTH/G8OSy5fQu0PvIK/0GEwmGDFC2+65B1QVdu+GX389MOUvOxtWr9a2//xHm/A3aBCcfjrKmDGElZdLXyrRbBr17hsdHc2yZctYunQpGzZsICwsjAEDBnDaaacFe32HcDqdWCyH/vINCwtj5cqVuN1ujMb23YdACCGEEC3P49OmnkHoGp23535SJ5skqxaUKqj10C/uyP//fb8dtw8Sw/R0P6ycUVEUBidY+CG/lvUljhMHpXw+7aRzzhz45BOoqqr7L8/QYVRfejn2mRejBsbJO1VwHtmEOs6ix6I/NEvQpDfx7NRnGd1pNNd/dT3f7/6eoa8N5fsrv6d7fPf6PRmtQCianKuqyi0LbsHtc3N297M5v9f5LbbvUEuw6Iky6rC5feRUuck47Bg/0OQ8dP37DpcUFhhQ0PhswN4derPs2mV1gamxb41l4WULGZE6IljLrD9FgW7dtO3KK7XLcnMPBKh++knrR7VuHaxbh+G//2USoN5xB3TvrjVRD3wNbB06aPcrRCM0+t1XURQmTZrEpEmTgrme45o8eTJvvPEG06dPZ8iQIaxZs4Y33ngDt9tNSUkJycnJR9zG6XTidB6YdmDz18u63W7c7hNPPmnNAutv649DhIYcP6KtkmNXNFZjj51AOYlRB0bVg9vd8h+8A5lSw5OHy7EfIsF67+lg1o6fgpojP4vWenys8WdJje5gxuM58iS4V5Senwu0E+Rcm4PEo5WdbdmCbu5cdB9+iJKbW3ex2rkzvksvZdvZM/k6PF27sBQorTzumqONOq7uHoHhKOWVF/a8kF5X9+LiTy9mR9kOJrw7gR+u/IFOUZ2Oe5+tgcOrYgtM1jSqzfraOvj4mbt5Lj/u+ZEwQxhPT3z6qD/n9qxLhIGN5S52VjhItx56TBX7J1PGGJVW816XYNJKZcucXqodLsz1bHZ+uCRrEt9f/j3nfXweq/JXMf6d8Xx84cdM7DYxmMttnMREuOgibQPIz0f55Ze6jZ070Tmd2pS/zZuPuLkaHY3avTv064dv9GjU0aOhZ08JVJ3k6vsaVlT1OF0WD/Lcc89x4403YrFYeO6554573TvvvLNeOz9kIYrC559/zvTp0495Hbvdzm233cacOXNQVZXExEQuv/xynnzySQoLC0lMTDziNg899BAPP/zwEZe///77WK1Na3wnhBBCiPav2hLLvo59MLtq6Fa4vsX3b/fauWzTZfjw8b++/yPOeJT0GtFmOA1hZKcMQfF56Zm7goNP2fbHdKEsKhWLs4ou+zdyrNO5vPge2MI7EFNdSHKZ1uPVWlhIyrJlpP7yCzG7d9dd1221kjdmDPvGjaOsd29cxjB2Jw/Gp9Nj8DhROP6pgEdnRNXpSSzbRVx14TGvV+Gu4K9ZfyXPmUeKOYVHMx8lxhhTz2clNGrNUexN7I/B46B7/poW2We1p5rbtt1GpaeSy5MvZ0bijBbZb2tSFRZHbofeGN12MgrWHnKc704aiMMUQVrxH0Tay0K2xsPtTBmKx2Ahff8mwp1Nawpu99p5fM/jbKjagEExMCV+CkOjhtI3oi8mXessz1a8XsKKi4nIzyciP5/w/Hwi8vKIyM8nrKQE5SghBWdkJGW9elHWuzdlvXpRkZmJz9Q6H59oHrW1tcyaNYvKykqiDutPeLB6B6W6du3K6tWriY+Pp2vXrse+Q0U56mS+E6lPUCrA7Xazf/9+kpOTee211/jLX/5CRUUFOt2RzUePlinVqVMnSkpKjvvEtAVut5ulS5cyceJEKV0UDSbHj2ir5NgVjdXYY2ddqZPvCxxkRho4r3P9pwwHyw97fmDy+5NJj0on6/asFt+/0ATrvUdVVZ7/w4bbB1dlRpDgL1Oqcft4Y0cVHhUu6Gyla+Sx95Fb4+Gj3TV02LeLyzcsxfTZpyjr1x/Yh9GIOmUKvssuQz3rLPC3v1BVlXl7athX4yXVqueiruEnnDi3vtTJdwUOIgwK1/aIPG4z+n22fYyfM569lXvp37E/3172LbFhsQ14dlrW2lInPxQ46BZp4Pxmfm0Hjp+v1a95c8Ob9IrvxerrV5+UPeJcXpWXttnwqnBN9wji/GXRqqry3FYbnsMubw2+yqlhp83D6UkWhiWYm3x/To+Ta76+hk/++KTusjBDGKd3Pp1J3SYxOWMymbGZraKv1gnf++x22LULZccOlDVrUJYvR1m9GsVx6JRQ1WRCHTIE9ZRTUEeMQO3VCzIywNz051O0TjabjYSEhBMGpepdvrf7oL+4HPzvUDAajXUN1j/88EPOOeecowakAMxmM+ajHOhGo7HdnMy0p8ciWp4cP6KtkmNXNFZDjx2bR/vjVqzFEJJjblXBKgBGdxotx3wrEIz3niSrgX3VHopdkOwPPq3eX41HhRSrge6xYcc+Gd22jS4ff8wN739M/PYtBy7X62HcOJgxA2XmTJT4+CPGbK8ptrOvxotRB+d0icJcj8b9gzsaWFXiwub2saXSy/COYce8brf4bnx75beMfWssm4o2ce7H57L0iqWttol3qVM7aU4Kb5nfJ9tqtvHmzjcBeHXaq4RbWj7I3RoYjdApwsieKjd7a1USI7Tn3uby4lG18fAJ4Wb0rSAgE5ASbmKnzUORwxeUY8VoNPLRzI/4avtXzN8xn8VZi8mrymPxrsUs3rUYlkK32G5MyZjClMwpnNH1DCJMEUF4JE1b81Efu9F4YJKnfyAZLhesXatN8fRvSlERyooV2tS/AEWBzp0P7U8V2NLTtfc10WbV97XS4J5SbrebXr16MX/+fHr3btrEgOrqarKyDvzFb/fu3axfv564uDjS09OZPXs2eXl5vPvuuwDs2LGDlStXMnLkSMrLy3n66afZvHkz77zzTpPWIYQQQghxLBWu0DY5X5GnfYAflTYqJPsXwZcUpgWlCms9DIiHKreXdSVagGRssvXIgNT27fDBB1qz8i1bUIB4wKfXkz/qNFKvuhRl+nSt2fAxlDu9/JhfA8C4lPB6H88GncIpSVYW76tmxf5aBiVYjpstlRmXydIrlnL626fze97vnPfheSy8bCEWQwMmBbaQlpy85/F5eGXfKwBcPehqTuvcvAOiWruMKBN7qtxk21yM8Ac6S/2T92LN+lYVkAItkAxQaA9e/y+domN6r+lM7zUdVVXZUryFxVmLWZS1iF/2/kJ2eTYvrX6Jl1a/hElv4tpB1/LCWS+g17WBQI3JBKNGaduf/qRN/MvOPhCkWrsWduwAmw327NG2JUuOvI/MTG0K4JAhMHSoFviKjg7BAxLNqcHvwEajEcdhqXiNtXr1as4444y67++9914ArrrqKt5++20KCgrIycmp+3+v18t//vMftm/fjtFo5IwzzmDZsmV06dIlKOsRQgghhDhchb/ReUwIglKqqrIiVwtKyeS99iPZagQcdSPmV+y341UhLdxAl0DZntsNX34JL78M339/4MZGI0yciPv8C3g1cyzV0XFc0SOa1PBj/0Xap6os2FuF2wfpEUaGJDQsQNQ/zszy/bVUunysK3HUBRGOpV/Hfiy+bDFnvnsmP+z5gZnzZvLZRZ9h1LeeTD+fqlLs0J7/xBYISj2/6nn2OPYQFxbHkxOebPb9tXYZUSa+y6shp9qNy6ti0it1QyXiWtHkvYBAUKrc6cPh8WExHL1Kp7EURaFfx37069iP+065j2pXNT/s/oFFWYtYlLWIPRV7eGXNKxj1Rp6d8myrKOtrEEXRSvUyMg5M/FNVKC7WglOHbzt3atlWW7dq2/vvH7ivzEwtQHVwoCpOei22ZY16B77tttt44okneOONNzAYGv8mPm7cOI7X0urtt98+5PvevXuzbt26Ru9PCCGEEKIhVFU9EJSqR6lTsGWVZVFSW4JZb2Zw8uAW379oHoET3P12DxVOL+v9WVKnJltR8vLg9de1raBAu4FOB1OmaKUx06ZBbCxGoOveKjaVOVlX4jhuUGp1sYPcGg8mncJZ6RENPqHV+7OlFuX4s6XiLZhOMIFseOpwvr70a6bMncL8HfO58osree/891pNlkeZw4tXBZNOIcYU3ADDwVRV5cnfnuSB7x8A4NEzHqVD+LEz2k4WsWYdMSYdFS4fe6pc9Igx12VKxbeiXlIBYQYdcWY9ZU4v+2rcdI9u3j5IEaYIpvWcxrSe01BVlQ82f8Bln13G8yufJz06nftOua9Z998iFAU6dtS2U0899P+8Xti3TwtIrVsHa9Zo2VV790JWlrZ99NGB63ftCgMGQJ8+0Lev9rVnT5DBZm1CoyJKq1at4rvvvmPJkiX079+f8PBD66E/++yzoCxOCCGEECKUajwqHhUUILoZT1yPJZAlNTRl6EnZELm9ijXrMOsVnF6V+Xur8Hp9DN/0G53/8S58/bV2QgbaydoNN8CNN2r9VQ4zKMHCpjIn28qdnJkaTthRsjdKHR5+8pftjU8Nb3TGX784M8sLa6lw+VhXYmdk4olP9k7vcjqfXfQZ5314Hh9u/pAIYwSvTXutVWR5BEr3OoTpm209lY5KrvriKr7c/iUAZ8adydUDr26WfbU1iqKQEW1iTbGDbJv7kKBUa8yUAi3LsMzpJaeq+YNSB1MUhVn9Z1FYXciflvyJPy/9M6mRqVza/9IWW0OL0+uhSxdtO+usA5eXlGjBqbVrDwSqsrNh925t+/LLA9dVFC1Y1afPoVuXLhAZqTVYbwXvRaKRQamYmBguvPDCYK9FCCGEEKJVCWRJRZp06I/TR6e5LM9dDsCoVOkn1Z4oikJSmIH9eUUkv/chZ817i7h9Bw0SOv10uOUWOP98ra/KMaRYDXQM01Nk97K5zHlEE3KfqjJ/bzVeFbpGGhkY3/gTab2iMCbJyoKcalYU2RmcEHbCbCmAqd2nMveCuVzy6SW8se4NIs2R/GfSf0IemCry9wZqrn5SG/dv5MKPLySrLAuT3sR/J/2X5PxkdErLB7dbq4woLSi1y+ZCVdW68r341hqUijSyvtTB3mp3SPZ/7+h72Ve5j//+/l+u+uIqkiKSOKPrGSe+YXuSkACTJmlbQHm5lk21ZcuBcr8tW6C0VAtYZWfD/PlH3pfBABERWoAq8LUx/w4PB49Hm0LocBz59eB/GwzHvy+rVcuMPck06F3Y5/Px1FNPsWPHDlwuF+PHj+ehhx4iLOz4deVCCCGEEG1ReQhL9+BAUGp0J+kn1W54vfDtt4x/6XXiF8/H4NKmOxIZCVddBTffrJWf1IOiKAxOsPDNvhrWlzgY1sFySLDn9/12Cmo9mPUKUxtRtne4vnFmlu2vpdzpY02xndFJ9SuNmdl3JtWuaq796lqeWfEM0eZo/j7u701aS1MdCEoF/7U9Z8Mcbpp/E3aPnfTodD6Z+QmDOg5iYcHCoO+rLesUYcSgQJXbR16Nhyq3NlSiNZbvgZYpBVqWnd3jO2pmYnP7z+T/kFeVx7yt85j+0XR+veZX+if2b/F1tCqxsTB+vLYdrLj4QIDq4GBVUZH2/x4PVFRoW2uhKFqQKzJSC6QNGRLqFbWIBgWlHnnkER566CEmTJhAWFgYzz33HMXFxfzvf/9rrvUJIYQQQoRMhSswDarlTz6qXdVs3L8RkCbn7cLOnfD22/Duu5CbS6L/4v09+2G89Vbirr1C+4t5A/WJNfNDXi2lTi/7qj2k+xulF9s9/FpYC8CE1HCighBY1fmzpebvreb3IjtDOlgw6+v32rhm8DVUuaq4a/FdPPTTQ2zYv4EHT3+QQUmDmryuxmiOyXtOj5O7F9/NK2u0KXuTMyYz94K5xFvjcbtDk13Tmhl1Cp0jjeyyuVlVbAcg3KAEvYl4sEQYdcRb9JQ6vORUu+kZ03IlfAE6Rce757/L/pr9/Lz3Z6bOncry65bTKbpTi6+l1evQQcs6Pf30Qy/3eqG6WtuqqrStKf+uqdEGUFgsEBZ27K9msxYIC9z28Pvz+bTm74G1NaF3d1vToEf67rvv8tJLL3HTTTcB8O2333L22WfzxhtvoDsJ08yEEEII0b5VOLW/3IciU2p1/mp8qo+0qDRSo1JbfP8iCKqqYN48eOst+PXXA5fHxuK55FK+OmMG5uFDObtLVKN3Ydbr6BNrZn2pg3UldtIjjXhVf68qFTKjTPSLC97Jc59YM8sK7ZQ5vawudjCmntlSAHeOvBO7287s72bz+bbP+Xzb55zX8zz+32n/j6EpQ4O2xhOpcfuo9miv7Q6W4Jz45VTmMHPeTFbmrQTgwdMe5MHTH2w1jd1bq4woE7tsbnZUuIDW208qoHOEMaRBKQCLwcIXF3/BqW+dytbirUydO5Vfr/2VGEtMSNbT5uj1EB2tbcGgqk3vTaWqWonfwYGq7t2Ds742oEGRpJycHM46qNHYhAkTUBSF/Pz8oC9MCCGEECLUAplSjW0O3RTL9/lL9yRLqm1RVfj5Z7j6akhKguuu0wJSOh1MnQoffwz5+RheepELZp7RpIBUwKAECwDbK13UuH0sL7Sz3+7FoleYEoSyvYPpFIVT/YGolUV2HF5fg27/l1P/wqZbNnFpv0tRUPhy+5cMe30Y0z6Yxqq8VUFb5/EESvdizbp69cU6kSW7ljDk1SGszFtJrCWWBbMW8PAZD0tAqh66RWk90wLz2OPNrTs7JFDCl1MV2sy32LBYFl22iOSIZLYUb+H8j87H6XGGdE0nrWC8vyqK1k+qY0fIyIBBg7TsqpNEg4JSHo8Hi8VyyGVGo1HSUYUQQgjRLgUanceGIiiVK0GpNmf7dpg4USsXeecdqK2FHj3gsccgJwcWLoSZM7VyjiBKshpIthrwqfB9Xg3L/GV7k9IiiDAGv5qhV6yJBIsep1dldZGjwbfv27Ev71/4Pltv28rlAy5Hp+iYv2M+I94YwVlzz6qbOtlcgtXk3OPz8K+f/8WU96ZQai9lSPIQ1ty4hrO6n3XiGwtAC/gf3Ni8tWdKBYJSxQ4vte6GBWSDvpbodBZdtohIUyQ/7vmRq7+8Gp8a2jUJ0RgNeidWVZWrr74as/lAqqLD4eDmm28mPDy87rLPPvsseCsUQgghhAgBl1elxqP9/T7G1LJtClRVrTsxlybnbUBtLTz6KDz5JLjdWu+QK66Aa66B0aNbZOz4oAQLBTnVbCnXsiV6xpjoHXvsyX1NEegt9eWeKlYV2xnWwdKoPkC9Enox5/w5PHjagzzyyyO8t/E9FmUtYlHWIiZ2m8jfT/87Y9LHBH39wegnta5gHTd8fQNrCtYAcP3g63n+rOexGIIbcDwZZESZKHVoPaVaa5PzAKtRRweLnmJ/CV+v2NCU8AUMTBrIZxd/xtS5U/lw84ekRabx1KSnQromIRqqQb89rrrqKjp27Eh0dHTddvnll5OSknLIZUIIIYQQbV2gdM+ib/nGu9nl2RTXFmPSmxicNLhF9y0aaP58bVreI49oAampU7UJT6+/Dqec0iIBKYDeMWbM/lI0q0Fhclpwy/YO1yvGRAd/ttRKf5Pqxuoe3523p7/N9tu3c93g6zDoDCzNXsqpb53KRfMuwua0BWnVmqZM3qt11/J/S/+P4a8PZ03BGqLN0bx13lu8fu7rEpBqpIwoY92/41t5phRQN0wgp7p1VAtN6DaBt857C4B/L/83z654NsQrEqJhGvTngbfeequ51iGEEEII0aoESvdC0eQ8ULo3JHkIZkNo/xIvjmHvXrjrLvjyS+37tDR47jmYPr3FAlEHM+kVRnQMY1lhLVPTI7A2Q9newRRF4dRkK5/vrmJ1kYPhHcIIa2LwNiMugzfOfYO/jv0rj//6OP9b/z/mbZ3Hhv0b+Oyiz+jbsW+T1+3xqZQ6GpcptXTXUm6afxO7K3YDMLPPTJ6d8izJkclNXtfJLC3cSMcwPXpFIbqFs1IbIz3CyJpiB3tbSVAK4PIBl5Nry2X2d7O5+5u7+TnnZ56c8CQZcRmhXpoQJ9T6X/VCCCGEECFQHghKmVv+45I0OW/FXC54/HHo3VsLSBkM8H//B3/8AeefH5KAVMCYJCv3Doyne3TLBDJ7RJvoGKbH5VNZWdS0bKmDdY3tyqvTXuXXa34lLSqNHaU7GPHGCD7Y9EGT77vU4cWHlgEZVc/AXUltCVd+fiWT3pvE7ordpEWl8dUlX/HxzI8lIBUEep3CNT1juKpnTLNm9wVLoK9UqcNLTYj7Sh3sL2P+wl/H/hWdouOzPz6jz0t9+POSP1PhqAj10oQ4LglKCSGEEEIcRaVLO9kIRZPzFXn+flISlGpVEjZuxDB0KMyerY3vPu00WL8enngCIiJCvTwA9C14Uq8cNIlvTbGDWk9wT9BHpo1k7Y1rmdBtArXuWmZ9Nos7Ft6By+tq9H0e3OT8RAEQVVWZs2EOvV7oxZyNc1BQuHPEnWy9dSvTek5r9BrEkdpCMCogzKCrK/1sLSV8oD2H/xr/LzbcvIFJGZNweV38e/m/yXwukxdXvojH5wn1EoU4KglKCSGEEEIcRXmIyvdqXDVsKNwAwKi0US26b3EMeXnor7ySMQ8+iLJ9uza2+9134ccftX5SJ7Hu0SYSA9lS+4OXLRXQIbwDiy9bzF/H/hWAF1a9wLi3x5Fry23U/dW3n1R2eTaT35vMlV9cSam9lP4d+7P8uuU8O/VZIs2Rjdq3aD8C2VJ7q1pPUCqgX8d+LL5sMQtnLaR3Qm9K7aXcvuh2Brw8gIU7F6KqaqiXKMQhJCglhBBCCHEUgUbnLV2+tzp/NV7VS2pkKp2iO7XovsVhXC5tol7Pnug+/BBVUfDecgts365N12tD2R3NRVEUxiZrU7jXlNipcnuDvg+9Ts+/xv+Lry/9mhhLDMtzlzPk1SF8v/v7Bt/X/hNM3ttWso07Ft5Bv5f6sTR7KWa9mUfHP8qaG9cwMm1kkx6HaD86t7Jm54dTFIWp3aey8ZaNvHjWiyRYE/ij5A/Ofv9spsydwuaizaFeohB1JCglhBBCCHEYn6rWle/FtHD53opcf+leJyndC6lvvoH+/eEvf4GaGnyjRvHTv/+N79lnISYm1KtrVTKijKRYDbh9sDinutkyMc7pcQ5rblzDoKRBFNcWM3HORB7/9XF8av3KBlVVPaR8L8Dr8/LV9q+YNGcSvV/szQurXsDusXNGlzPYdMsmZo+djVFvPNbdipNQp3AjClDm9DZLIDZYDDoDtw6/lZ137OTPp/wZk97Ekl1LGPjKQG6ef3PQJ1sK0RgSlBJCCCGEOEyV24dPBZ0Ckc08xexwgcl7o1KldC8kdu/WGpZPmQI7dkBiIrzzDt4ff6QyQyZZHY2iKExNj0CvwC6bm01lzmbbV7fYbiy7dhnXDLoGn+pj9nezOf+j8+vVzLnK7cPhVVGABIue0tpSnvrtKTKfz+S8D89jafZSFBTO7XkuSy5fwndXfkf3+O7N9lhE22Ux6Ej0BzZzWmEJ3+FiLDE8OfFJtt66lRl9ZuBTfby65lXO+/A8nJ7me70KUR8Nm4MqhBBCCHESCPSTijbp0LVgiZaqqnVBKcmUamF2u1aq9/jj4HCAXg933gl//ztER4O79Z94hlKHMANjk638mF/Ld7k1dIk0EtVM/djCjGG8ee6bjE4bze2Lbuer7V+R/J9kMmIz6B7fnczYTDLjDmxpUWnodXqK/KV7Nfat3Dz/XeZumovD4wAgLiyO6wdfzy3Db6FLTJdmWbdoX9IjjRTaPeRUu+kbZwn1cuolIy6DeTPn8eOeHzn3g3P5cc+PXP3l1cy9YC46RfJVRGhIUEoIIYQQ4jAVTv/kvRZucr67YjdFNUUYdUaGJA9p0X2ftFQVvvwS7rkH9uzRLhs/Hp577qRvYt5QIzqGsaPCRX6th0U51VyUEdVsU9UUReGGoTcwOHkwF39yMdnl2Wwp3sKW4i1HXNekN9Etthvx4d3ItRWzt3xV3f8NShrEHSPu4NJ+lxJmDGuWtYr2KT3CyMoie6tsdn4i47qM47OLP2Pq3Kl8uPlDOkV14smJT4Z6WeIkJUEpIYQQQojDHGhyHpp+UkOSh2AxtI2/vLdp69fD//0fLF2qfZ+WBk8/DTNmSBPzRtApCmd3juCtbRXsrnKzodTJoITmPY6HpQxjx+072FOxh6yyrANbufY1uzwbl9fFtpJtULINAL3OwMw+M7h9+O2c0umUZgucifatU4QBBahw+bC5vM2WGdhcJnSbwP/O/R9XfnElTy17ik5Rnbhj5B2hXpY4CUlQSgghhBDiMBXO0ASlAtPETul0Sovu96Szdy/87W8wd66WKWUywX33wQMPQHh4qFfXpsVbDJyWEs73eTV8n6eV8TX360iv05MRl0FGXAaTmXzI/3l9XvbZ9rGpaAdvb9mIy+fkwTGXMzypc7OuSbR/Zr2OJKuBgloPe6vc9I9vW0EpgCsGXkGuLZcHvn+AuxbfRUpkChf2uTDUyxInGSkcFUIIIYQ4TKB8L8bUch+VVFVlcdZiAKZkTmmx/Z5UysrgT3+CHj3gvfe0gNTFF8PWrfDIIxKQCpLhHSykhRtw+VQWNuM0vvrQ6/R0ielCmPUUhne5mpn9bmNYYnrI1iPal84R2lTGnOq2V8IXcP+p93PLsFtQUbnss8v4NefXUC9JnGQkKCWEEEIIcZjyEJTvbS7aTF5VHmGGME7rfFqL7fekEGhinpGhlee5XFrfqFWr4MMPtctF0CiKwtmdIzHqtJP1NSWOkK6nyu1lnX8NY5OtUq4ngiY9su0HpRRF4fmpz3Nez/Nwep2c+8G5WrmrEC1EglJCCCGEEAdxeHw4vVpmR0wL9ghZlLUIgDO6niH9pILF64W339Yyo/7yF6iogAEDYNEi+PZbGDYs1Ctst2LNes5I0TLPfsyroczhDdlaVuy341UhLdxAF38QQYhgSAs3ogMqXb66su+2SK/T8/6F7zMqbRTljnKmvDeFgqqCUC9LnCQkKCWEEEIIcZBAllS4QcGkb7mMirrSvQwp3WsyVYUFC2DQILjmGsjNhU6d4J13YO1amDJFGpm3gMEJFjpHGPGosDCnCl8IyvhsLi/r/VlSp0qWlAgyk14hOVxr0xzqbCmby8t3udV8vaeqUWuxGq18fenXdI/rzt7KvZz9/tlUOauaYaVCHEqCUkIIIYQQB6kM9JNqwdK9KmdVXR+Pqd2ntth+2x2nE956CwYOhHPOgc2bITYWnnoKduyAK68EfdtrRtxWKYrCWZ0jMOkUcms8rCqyt/galvuzpDpFGOr6/wgRTOn+42pvVWiCUpUuL9/sq+bVreWsKnawpdzJ+zsrmbuzgr1Vrgb1dEuwJrD48sV0DO/IusJ1zJg3A7e37ZYmirZBglJCCCGEEAep9GdKRRlb7mPS97u/x+1zkxmXSWZcZovtt90oLoZ//hM6d4Zrr4VNm7Sm5X/+M+zapU3Ws0hJZChEm/ScmaqV8f1cUEuJw9Ni+650edlQ6u8llRQuWVKiWQSCnfuq3S3a1L/C6WVRThWvbi1nXYmjLvg6MN6MToF91R4+yLIxd2cle2z1D051i+3GglkLCDeGs2TXEm5aeFNIhxWI9s8Q6gUIIYQQQrQmNreWKRUVgn5SUrrXQFu3wn//C3PmgMPfTDstDe68E264AWJiQrk64Tcg3sz2CifZVW4W7K3mih7R6FogQLS80I5P1YIG6dJLSjST1AgjOkX73VHh8hHbzFm25U4vywtr2VzmxOe/rHOEkTFJ1rrjfEySlRX77WwodZBb4+HDXTZSww2MSbLSNdJ4wgDtsJRhfDzzY8794Fze2/QeJfElnOk5E6NRXkci+CRTSgghhBDiIDaX9jE/2tQyH5NUVa3rJyWle/WgqrB0KZx1FvTtC6+/rgWkhg2D99+H7GwtQ0oCUq2GoihMSY/ArFcoqPWwYn/zl/FVOL1sLD3QS0qI5mLUKaRY/X2lmrGEr8zhZf7eKl7bWs5Gf0CqS6SRy7pHc2n36EMCr1EmPZM6RXBzn1iGdrBgUCCvxsPHu2y8u6OSrMoTZ06d1f0sXj3nVQAWly7mlLdOYdP+Tc32+MTJSzKlhBBCCCEOUle+10KZUttKtrG3ci9mvZlxXca1yD7bpOpqLej0wgtaeR5ozcqnT4d774UxY6R5eSsWZdIzITWcBTnV/FpYS7coE0nW5jsVWba/tu6kvZP0khLNLD3SSG6Nh73VbgYmBLdU2O1TWbqvmk1lTgJhpG6RRsYkW0kNP/6xHWnSMzEtgtGJVn7fX8u6EgcFtR4+ybbRwaIn6gR/fImKnsFfxkXw8m+3sLl4M8NeH8bjZz7OXaPuQqe03/yWPVUutpY5OT0lnPAglPJ7VZVf8muJtegZGC+l5IeToJQQQgghxEECmVIn+rAeLIHSvdO7nI7VKBkdR9i6FV5+Gd59F2w27bLwcK131F13QUZGaNcn6q1fnJntlS6yKl18mFXJpZnRJDZDYKrc6WVTqROAsZIlJVpA5wgjy7CTU6X1lQpW/zK3T+XTbBt7/BlYGVFamV7KCYJRh4sw6jgzLYJRiVZWFtlZW2Kn2OGl2OE94W1jos7k7tN/ZNXu2SzatZB7l9zLwqyFvH3e26RGpTbqcbVmVS4vn++uwulVqfH4mNEtqsk/z+WFdlYU2dEBGVEmIlqwZ2VbIEEpIYQQQgg/l1fF4dX+Ft1SQalA6Z70kzqIywWffw4vvQQ//3zg8sxMuPlmLSAVGxu69YlGURSFc9Ij+GiXjYJaDx9kVXJJZnTQM6aWFdaiomWTnCiTRIhgSA03oleg2uOjzOkl3tL0Y9rtU/lkl4291W6MOriwaxRdokxNus9wo44zUsMZ2TGM3VUuvPXoX76ssBZI4YEzPuS8Xu9zzzf38G32twx4ZQCvnfMaF/a5sElrak1UVWXxvmqc/idml83NxjJnk7Kb9td6/M8h+ICNpQ5OSZJg+cEkRCeEEEII4Wfzl+6Z9QoWffN/TKpx1fDT3p8A6ScFQE4O/PWvkJ4Ol1yiBaR0Ojj/fFiyBLZvhz/9SQJSbZjFoOPizChSrAYcXpUPsiopqA1eH54yh5fNZVqWlPSSEi3FoFPqAqA51U0/nl1elXn+gJRJp3BRRnSTA1IHsxp19I2zMCD+xNvoDmYAVpe6uHrwDay7aR1Dk4dSZi9jxrwZXPPlNVQ5q4K2tlDaWOZkl82NXoFB/kDUd7k1dWX9DeX1qczfW4UPiPRnR60vdeCTaYaHkKCUEEIIIYRfZaB0r4VS63/c8yMur4suMV3oGd+zRfbZ6vh8sHgxnHsudO0Kjz4K+/dDcjI8+CDs3QuffQYTJ2oBKtHmWfRaYCo13IDTq/Jhlo38muAEpn7zZ0llRBkbXOIkRFOk+3uXNbXZucurMi+7kpy6gFRUSPui9YoxYnLX4vCqrCl20DOhJ8uuW8bsU2ejoPD2+rcZ9Ooglu1bFrI1BkOly8t3uTUAnJZsZVKncFLDDbh8Kotyqk/YGP5ofiuspdjhJcygcHmPaCx6BZvLx25b8zXEb4vkN7sQJ6kdlW4qwjuGehlCCNGq2NyBJuct209qSsaUoPUgaTNKSuDJJ6F7d5g6Fb7+WgtQjR8P8+ZpwaiHH4a0tFCvVDQDs17HRRlRpPkDUx9l2chrYmCq1OFha3mgl1R4MJYpRL0Fpt/lVLsbFcAAcHp9fLyrkn3VHsw6hYszo0gLcaN+naKQULkPgJVFdhxeHya9iUfPfJSfrv6JztGdyS7PZuxbY7l5/s18tPkjdpfvbvRzEAqqqgWeXD6V1HADwzuGoVMUzk6PxKDAnio360ocDbrPgho3y/2TRienRRBt0tM/Tss6W1fasPtq7yQoJcRJyOn1sWBfLQXx3euyAoQQQhxoch7dQpP3Av2kTprSPVWFZcvgiiu0YNNf/gLZ2RAdrTUt/+MP+O47mDEDjJLl0t5pgaloOkUYcPq0wFRuE0qffiu0owLdo5t3sp8QR5NiNWBQoMajUlqPBuKH0wJSNnJrPJj1ij+bsHW8D0bVlhBn1tVlSwWM7TyWDTdv4PIBl+NTfby65lUu+fQSuj3XjcR/J3LO++fwj5/+weKsxZTWlobwERzfuhIHe6rcGBQ4Oz0Snf+PRHEWPaenaAHuH/JrqHDW7+fq8anMz6lGBXrHmOgVqwWjBvknM+6qdNW1CxDS6FyIk1JBjYdAKKrQ7iEh3BzS9QghRGvRkpP3dpbuZFf5Low6I2d0OaPZ9xdS1dUwd642RW/DhgOXDx0Kt96q9Y+ySv+fk5FJrzCzWzSfZNvIqXbz8S4bMxtRrlRsP5Aldao0ERYhEOgrtbfazd5qNwlh9T/Vdnh9fJxlI7/Wg0WvNMsAgKZQgNEdzSzYZ2dlkZ2hCRYsBu33ZLQlmjnnz2FWv1ks2LmAlXkrWV+4nuLaYhbsXMCCnQvq7iczLpMRqSMYkTKCEakjGJQ0iDBjWIgelabC6eWHfK1s7/SUcOIsh/5RalgHCzsqneyr9rAgp4pZmdEnzGz+uaCWUoeXcIPCpE4RdZfHWwykRxjJqXazodQhGZ1+redIF0K0mNwaT92/C+1e+oVwLUII0ZoEmpm2RKZUoHRvbOexRJojm31/IbFtG7zwArz7LlT5G+FaLHDppXDLLTB8eGjXJ1oFk15hZkZU3aSxj3dVMjMjuq5HT3385p9u1SPaRGIrOpkXJ5fOkVpQKqfazdAO9Qu2ODy+uomUrTEgFdAzysjvFhclDi+riu1HBFSmdp9al/Xr8DjYULiBlXkrWZm/kpV5K9lRuoOssiyyyrJ4f9P7ABh0BgYmDtQCVf6tV0IvdErLFHSpqsqCnCrcPugUYWBYhyOn7Cn+Mr43t5Wzr9rD6mIHwzse+2ebW+1mZZFWtjclPYIww6GPZXCCxR+UcjImyVqXlXUya31HuxCi2R3cTHS/XVJHhRAioCUzpQKle1MypjT7vkLik09g1ixw+3/n9OgBN98MV10FcXGhXZtodYw6hRkZUXyWbWN3lZt5uyqZ0S2KzpEnnjhWZPewrcIFyMQ9EVp1zc79faVOlFHj8Pj4cJeNwloPYf6AVGsNqiqKwqlJVr7YU8XqIgfDOoQdEXAJsBgsjEwbyci0kXWXldvLWZ2/mt/zfuf3vN9ZmbeSopoi1hSsYU3BGl5e/TIAkaZIhqUMY0TqCEamjmRCtwnN9oeb1cUO9lV7MOq0sr1j/bxizHrOSAlnSW4NP+XXkBFlOiKjCsDt04JcAP3izHSPPrIapUe0CatBodrtI6vSRY8YqVhpnUe8EKLZqKpKXu2BTKn9dm+9fmkKIUR751NVqtwtE5Syu+38sOcHoJ32k3r1VS0TSlVhwgS4/36tgbn8rhHHYdQpXNhNC0xlV7mZt8vGgHgLuhMcNrnV2ueaXjEmOjagZEqIYEu2GjDqwO5RWbSvGtMJDt69Ve666WyXZka3+uO3Z4yJDhY9xQ4vq4rsnJZS//Kz2LBYJmZMZGLGREA7J9ln28fKvJX8nvs7K/NXsjp/NVWuKn7Y80Pd78gwQxjn9jyXWf1nMSVzCib9iQPV9VHm8PKTv2xvfGo4MebjZ0gPTrCwo9LFnio3C3KquKx79BFZTj/m11Du9BFp1DEh9ejPjV6nMCDOwooiO+tKHBKUQoJSQpx0Sh1enF4VgwJerxcXesqdvqNG+4UQ4mRS5fahAjoFIo7x199g+Xnvzzg8DlIjU+nboW+z7qtFqSo89hj89a/a9zfdBC++CHr5HSPqx6BTuKBbFJ/vtrHL5mZtAyZejZFeUiLE9DqF9Agju2xuNpY663Ubqz8g1aGVB6RAy5Yam2zls91VdWVsx8qWqs99pUenkx6dzow+MwDw+Dz8UfyHVvaXt5Lv93xPVlkWH235iI+2fESsJZaZfWYyq/8sxnYe2+gyP5+/bM+jQpdII4PijyzbO9p6p6ZH8L8/Ksir8bCqyM7IxAPvOXurXHVN4KemR9T13DqaQQlaUGp3lZsKp/eEAbH2LqRH/s8//8xTTz3FmjVrKCgo4PPPP2f69OnHvc3cuXN58skn2blzJ9HR0UydOpWnnnqK+Pj4llm0EG1cnr+fVFKYntJyG3ZzFIW1HglKCSFOenWle0Zds2ePBvpJTc2c2n4yVX0+uO8+eOYZ7fu//Q3+8Q/JjhINZtApnN81io2ljrrsxRNJCTe0iZN60f5NTIsgqcyBTz3xdfWKQr84c5sKSnSPNpEYpme/3cvvRXbGNSBb6kQMOgP9E/vTP7E/1w25DlVVWVuwlrmb5vLh5g8pqC7gtbWv8dra10iLSuPSfpcyq/8sBiYObNDv0lVFdvJqPJh1WqCpvreNNukZnxbOopxqfi6oJSPKREKYAafXx8KcagAGxpvpFnX8bK4Ys56ukUZ2V7lZX+oI6nPYFoX0nbumpoaBAwdy7bXXcsEFF5zw+r/99htXXnklzzzzDNOmTSMvL4+bb76ZG264gc8++6wFVixE25fr7yeVYjVQs78auzmKglo3feIkdVQIcXILjGeOasEm5+2mdM/thuuv1xqagxaYuvvukC5JtG0GncKQejaKFqI1iTHr2/VUNUVRODXZyqfZVawptjOiQxhWY/NkFyuKwtCUoQxNGcpTE5/ip70/MXfjXD7941Nybbk8tewpnlr2FL0TenNh7wuZ2n0qI1JHYNAdO8xRYvfwc4E2GGF8WniDB5sMiDOzo8LJLpub+TnVXNkjmh/yaql0+Ygy6Rh/jLK9ww1KsLC7ys3GUgdjk6zoT1Sn3I61TFv7Y5g6dSr/+te/OP/88+t1/eXLl9OlSxfuvPNOunbtyqmnnspNN93EypUrm3mlQrQf+f5MqRSrHotLi+gX2j3Hu4kQQpwUKluoyXl2eTY7Sndg0Bk4s+uZzbqvFmG3w4UXagEpvV77KgEpIYRotzKjTCRZDbh98Lt/0lxz0+v0jO86njfPe5PC+wr59KJPubD3hZj1Zv4o+YN//fIvxvxvDB2f6sjFn1zMW+veIr8q/5D78Kkq83Oq8aqQEWVkQCP+KK8oClPSIzDrFQprPXy+u4r1pVrZ3tnpEZj19fsMkRltIsKoo9ajsqPS1eB1tCdtKsd19OjRPPDAAyxcuJCpU6dSVFTEJ598wllnnRXqpQnRJtg9PkqdWiZAslWPxaU199tf68WnqjKSVAhxUmupyXuBqXundDqFaEt0s+6r2VVWwrRp8MsvYLHAxx9r3wshhGi3FEVhbJKVedk2LVuqYxjhzZQtdTQWg4ULel/ABb0voNJRyRfbvmBR1iKW7FpCuaOcj7d8zMdbPgZgQOIApmZOZUrmFDAOorDWg0WvBZYaWz4fadQzMS2c+Xur2ekPKA1JsBx1WqiqqmSVZbE0eykltSWH/N/eKhc51R5W79Yx4LC+VjcMuYHkyORGra+taVNBqTFjxjB37lwuvvhiHA4HHo+HadOm8eKLLx7zNk6nE6fzQJM5m80GgNvtxh0YUdxGBdbf1h+HaDk5VdqxEmvSYVS9mN21GBRw+VSKqp3ES18p0QbIe59orBMdOxVOLWs0Qte8x9fCnQsBmNh1Yts+jgsLMZxzDsrGjahRUXi/+AL11FO1Ur52SN57RFPI8SPaqmMdu53CIDlMT4Hdy7KCasYlh6bc1qq3MqvvLGb1nYXH52F1/mq+yf6GJdlLWJ2/mo37N7Jx/0ae+O0JTPpwkqL6EGc2MG/V8QNSFoOFQYmDGJYyjOEpw0mPSj8kiNUjQkdmpIGsKg8xJh2ndjTVPUfVrmp+3PsjS3YtYUn2ErIrsk/4OD497Psp3aaQYElo8PPRmtT3/U5RVbUeLdian6IoJ2x0vnXrViZMmMA999zD5MmTKSgo4M9//jPDhw/nzTffPOptHnroIR5++OEjLn///fexWmVChzi5FEWnUxrdiejq/aSUZQGwp2N/7JYoUkp2EF1bHOIVCiFE6OxKGozLZKVT0WYiHJXNsg+3z80Vm6/A4XPwdI+n6Wbt1iz7aW7W/fsZ/fe/E1FYiCMmhuUPPoitW9t8LEIIIRqn2hLDvo59UXxeMvLXYPS1rqCrzWNjXdU6fq/9g42Vq6l2lZz4RscQbYimu7U73a3d6WHtQaY1E6sxhtKoVKKqi9hfvZ11VetYa1vL1pqteNQD7VEMioHe4b1JNiejcGgwrNoSg9tgweKqIcxVVXf5jMQZdDB1aPR6W4Pa2lpmzZpFZWUlUVFRx7xemwpKXXHFFTgcDubNm1d32a+//srYsWPJz88nOfnI9LajZUp16tSJkpKS4z4xbYHb7Wbp0qVMnDgRo9EY6uWINuDj3dXsq/EyMSWM3pEKS5cuxTzwdNaXexgSb+KMEP2FQ4iGkPc+0VjHO3ZUVeX5P2y4fXBN9wjimmkS0ve7v2fKB1NICk9i75172+bkvR07MEyciFJQgNqlC56FCyEzM9Sranby3iOaQo4f0Vad6Hfnh7tryK/1MjjexPhWeC6xsczF0nw7PtVHFNtINO6HevzqrXRUsqZgDSvzV7KxaCMe35E9eDNjM+ndoTfrCtaRW5V7yP91ie7C5IzJTOo2iXGdxxFpjjzqfrKr3Hy+txaLXuHGnpEY21HDc5vNRkJCwgmDUm2qfK+2thaD4dAl6/Xah8ZjxdbMZjNm85ENzIxGY7v5hdCeHotoPj5VpdCu9ZPqFGXGaNBeM8nhRtaXeyhy+OQ4Em2KvPeJxjrasWP3+AhMno+zmpvtQ+HSPUsBmNJ9CibT8UdGt0r792s9owoKoF8/lG++wZiSEupVtSh57xFNIcePaKuOdeyelgwf7rKxsczFKUnhRLbABNv6WltsZ2m+1oh9REcrZ6aObdAfg67negAcHgfrC9ezMm8lv+f9zsq8lWSVZZFVrm2glfuN6zKOKRlTmJI5hR7xPeq1r+6xBqIKHNhcPrJrfPSLs5zwNm1Ffd/rQhqUqq6uJisrq+773bt3s379euLi4khPT2f27Nnk5eXxrn+88LRp07jhhht4+eWX68r37r77bkaMGEHKSfaBSIiGKrJ7cfvArFNIsOjxerRof2KY9otjv90jzc6FECetwOQ9q0Fp1r9SLspaBMDUzKnNto9mU1MD55wDu3dDt27w3XfQsWOoVyWEECKEOkcaSQs3kFvj4bdCO5M7hbeKLOA1xXaW5mpDnYZ3sDA+tfHrshgsjEobxai0UXWXldaWsjp/NVuKt9CnQx9O73w6YcaGZ4rpFIVB8RZ+LqhlXYmjXQWl6iukQanVq1dzxhln1H1/7733AnDVVVfx9ttvU1BQQE5OTt3/X3311VRVVfHCCy/wpz/9iZiYGMaPH88TTzzR4msXoq3Jr9FqvFPCDegUBa//8jiTDpNOweVTKXV46RDWphIohRAiKGwu7V0xqhn/wptTmcPW4q3oFB0Tuk1otv00C48HLr4YVq+G+HhYvFgCUkIIIbRJfMlWPsiysb7UwX67hzFJVjKijCELTq0qsvNdnhaQGtkxjHEp1qCvJd4az+TMyUzOnNzk+xoQb+HXglryajwU2T10PMnOx0L6aMeNG3fMsjuAt99++4jL7rjjDu64445mXJUQ7VNejZYZlRJ+6MteURQSrXr2VXsoqPVIUEoIcVKy+TOlok3NN9J6cdZiAEaljSIuLK7Z9hN0qgq33QYLFoDFAvPnQ/fuoV6VEEKIVqJzpInTkq0s319LQa2HT7JtJIbpGZNkpXu0qUWDU7/vr+WH/FoARieGcVpy8ANSwRZh1NE9xsT2ChfrSxxM6hQR6iW1qOb75CWEaFVy/ZlSaeFH1vYm+QNRhbVHNvATQoiTgc3fUCrK2PxBqSkZU5ptH83iscfgtddAUeCDD2DUqBPfRgghxEnllCQrN/eJY2THMIw62G/38tnuKt7aXsG2Cudxk1GCZcVBAalT2khAKmBwvFa2t6XMicvbKmbRtRgJSglxEqh2++r6pSSHH5kJlWzVAlUSlBJCnKwqm7l8z+V18W32twBM7d6G+knNmQN//av27+eeg+NMSRZCCHFyCzfqOCM1nFv6xjE6MQyTTqHI7uWL3VX8b1sFf5Q3X3BqeWEtP/oDUmOSwhjbhgJSoPXmijXrcPpU/qhwhno5LUrqdIQ4CeT5s6Q6WPRY9EfGopOs2ltBkd2DV1XRt6E3cCGECIZA+V5UM5XvLd+3nCpXFR2sHRiSPKRZ9hF0330H116r/fu+++D220O7HiGEEG2C1aDj9JRwRnQMY1WxnTVFDoodXr7cU8VvFj1DEixHPSdprEK7h5VF2pS9sclWxiRZg3bfLUXxNzz/IV9reD4w/uRpeC5BKSFOAvn+flKpRyndA4g16zDrFZxelRK7l0SrvDUIIU4ugUbn0c2UKRWYujc5czI6pQ0kqm/aBBdccKDBuQyVEUII0UBhBh2nJYczokMYq4sdrCq2U+LwssQ/FS/YTku2ckobDEgF9PdP4Sus9VBQ666rZmnv5MxTiJNA3kGT945GURSSwgzsrXZTaPdIUEoIcVLx+FRqPFo5QXNkSnl8Hj7a8hEAUzPbQOlebi5MnQo2G5x2GrzzDujaQCBNCCFEq2Qx6Dg12cqwjhbWFjvIqXYTzCo+RYHeseY2n11kNejoHWvG5T25KlfkzFOIds7rUynw94o6WpPzgCSrPyhV62FgfEutTgghQi9QumfUQZg++B8Cv9j2BXsq9pBgTeD8XucH/f6DqrJSC0jl5UHv3vDFF2A2h3pVQggh2gGLXscpSVZOCfVCWrGz0yPaVC+sYJA/ewnRzu23e/Cq2olWrPnYL/lAXylpdi6EONnY3P4m50Z9s3wQfGbFMwDcPPRmwoxhQb//oHG5tJK9zZshKQkWLYLY2FCvSgghhDhpnGwBKZCglBDtXq6/n1RKuOG4b3LJBzc7951cY0iFECe3ymZscv577u8s27cMk97EbSNuC/r9B43LBbNmwfffQ0QELFwInTuHelVCCCGEaOckKCVEO5fv7yd1vNI9gGiTDotewatCscPbEksTQohWIdDkvDmCUoEsqUv7XUpSRFLQ7z8o7HY4/3z49FMwmeCTT2Dw4FCvSgghhBAnAQlKCdHO5R2UKXU8iqJICZ8Q4qQU6CkV7Ml7OZU5fLL1EwDuGXVPUO87aKqr4eyztcyosDD46iuYPDnUqxJCCCHESUKCUkK0YzaXlyq3DwXqNVI0UMJXUOtu5pUJIUTrYWum8r3nf38er+plfNfxDEwaGNT7DoqKCpg0CX74QSvZW7xYAlJCCCGEaFEyfU+IdiyQJZUYZsBUj4lSkiklhDgZVdaV7wUvU6rKWcXra18HWmmWVEmJFpBatw5iYuCbb2DEiFCvSgghhBAnGQlKCdGO5fr7SZ2odC8gEJQqtnvx+FQMupNv+oMQ4uSiqipVbn+mlDF4mVJvrX+LSmclPeJ7cFb3s4J2v0FRUAATJsDWrdChAyxdCgNbYSaXEEIIIdo9Kd8Toh3L92dKnajJeUCUUUeYQcEHFNslW0oI0f7VeFS8KihAZJDK97w+L8/+/iwAd4+8G53Sij5u7d0LY8dqAamUFPj5ZwlICSGEECJkWtGnJCFEMLl9Kvv9ZXipEfXLlFIUheSwQF8pCUoJIdq/wOS9CKMOvRKc7NCvd3xNdnk2cWFxXDnwyqDcZ1Ds3KkFpHbtgq5d4ZdfoFevUK9KCCGEECcxCUoJ0U4V1nrwAREGXYNKUqSvlBDiZFJZN3kveB+Jnl7+NAA3Db2JcFN40O63SbZsgdNOg337oGdPLUOqW7dQr0oIIYQQJzkJSgnRTuX5+0mlRhhQGvDX/ySrZEoJIU4etiA3OV+Tv4Zfcn7BoDNw2/DbgnKfTbZ2LZx+OhQWwoAB8NNPkJYW6lUJIYQQQkhQSoj2KtffTyrF2rB5Bsn+65c4vLh9atDXJYQQrUkgUyoqSJlSz6x4BoBL+l1CalRqUO6zSTZuhDPPhNJSGD4cfvgBEhNDvSohhBBCCECCUkK0S6qqku/PlEqLqF+T84AIo45wg4IKFEmzcyFEO2cL4uS9PFseH235CIB7Rt3T5PtrsqwsmDQJKipg9Gj49luIiwv1qoQQQggh6khQSoh2qMLlo9ajolcgMaxhmVKKokhfKSHESSNQvhcdhPK9F1a+gMfn4fTOpzMkeUiT769J8vJgwgTYv18r2VuwAKKiQrsmIYQQQojDSFBKiHYo0E8qyWrAoGv4NCnpKyWEOFnYglS+V+Oq4dU1rwKtIEuqpAQmToS9eyEzE5YsgdjY0K5JCCGEEOIoJCglRDuU5+8nlRresNK9gGSrdjvJlBJCtGdOrw+HV+ud19Sg1Dsb3qHcUU5GbAbn9DgnGMtrHJsNpkyBP/7Qmpl/+630kBJCCCFEqyVBKSHaodxqLVMqJbxhpXsBgUypUocXl1eanbcHqio/RyEOF8iSMusVzPrGfyTyqT7+u+K/ANw96m70uuBM8mswux3OPRfWrIGEBFi6FDp3Ds1ahBBCCCHqQYJSQrQzTq+PEofWIyW1kUGpCKOOSKMOFdgvzc7bvF8KavjvpjJy/MFK0fb8lF/DfzaUsDinmkp/DyTRdIGgVHQTs6QW7FjAzrKdxFhiuHrQ1UFYWSO43XDRRfDTT1rvqG++gV69QrMWIYQQQoh6kqCUEO1MQY0HFe0kK9LY+L/WS7Pz9kFVVTaUOnF6VRbsrcLp9YV6SaKB7B4fq4rsuH2wvtTBq1vLWZRTRYVTglNNZXNrz2FUE94rAZ5Z8QwANw65kQhTRJPX1WA+H1x9NcyfDxYLfP01DAlxo3UhhBBCiHqQoJQQ7UxebdP6SQVIUKp9KHf6qPaPvK90+fghrzbEKxINtbnMiUeFOLOezhFGfCpsKHXy2tZyFu6tolyCU41WGYQm5+sL1/PDnh/QK3puH3F7sJZWf6oKt98O778PBgN8+imcdlrLr0MIIYQQohEaV9sjhGi18vwlWo0t3QtICpOgVHuwt9oFQKRRR5Xbx/pSBz1iTHSLMoV4ZaI+VFVlfYkDgOEdLQxOCGNftZtlhbXsrnKzsczJpjInfePMnJJoJc4Sol5GbVQwyvcCWVIz+86kU3SnoKyrQf72N3j5ZVAUmDMHzjqr5dcghBBCCNFIkiklRDuiqmrQM6VKnV4p+WrDcqq0IOWAeDNDO1gAWJRTjcMjP9O2YF+1h1KnF5NOoU+sGYBOEUYuzozmih7RdIsyoqJlU73+Rzlf76mi1CGB5Pqy+ftzRZkaF8zbX72fDzZ9AMA9o+4J2rrq7amn4NFHtX+/8gpccknLr0EIIYQQogkkKCVEO1Ls8OL0qhh10CGsaRkT4UYdUUbtLWJ/rZQHtUWqqtY1N+8cYeL05HBizVrG1Hd5NSFenaiPdSV2APrEmo+YDpcabuSijGiu6hFNhj84taXcyet/VLC13BmC1bY9tiaW772x9g3cPjej0kYxInVEMJd2Yq++Cv/3f9q/n3gCbryxZfcvhBBCCBEEEpQSoh1ZXaSdwKZHGNErSpPvL5AtVVArU9vaolKnlxqPikGBlHADJr3C2emRAGwqc5JV6QrxCsXx1Lh9bPf/jAYlWI55veRwIzMzorm6ZwxdI7UMyVX+9wJxbF5VpcodKN9reBDf4/PwyppXALht+G1BXdsJvfce3HKL9u/77z8QnBJCCCGEaGMkKCVEO1Hu9LKpTMuOOCXJGpT7TJZm521aoHQvNdyIQacFKdMijIzoGAbAopwq7FLG12ptKnPgU7XXYSBAfDxJVgNnddYmvxXUeqhxy8/2eKrdPlRAr0C4oeFB/K+3f02uLZcEawIz+swI/gKP5fPPtUl7gQbngfI9IYQQQog2SIJSQrQTvxXWogLdooxN7icVUDeBzy5BqbZor790Lz3y0ONhbLKVeLOeGo/K0lwp42uNDm5wfrwsqcNFGvUk+kt3s22SCXc8gcl7kUYdSiMyS19a/RIA1w++Houh/j+jJlmyROsb5fVqgalnn9UanAshhBBCtFESlBKiHShzeNniz5IaG6QsKTgQlCp3+qQxdhtzcD+p9IhDg1JGncLZnSNQgK3lTrZXSP+h1mZPlZsKlw+zXqF3jLlBt83wT1aUoNTxNaXJ+faS7Xyb/S06RcfNw24O9tKO7pdfYPp0cLlgxgx4/XXQycc4IYQQQrRt8mlGiHYgkCWVGWUiOUhZUgBhBl3dqHTJlmpbShxe7B6t6X3KUUq/UsKNjErUyvi+2VdNrZR6tSrr/FlS/eLMmPQNy4TJiPYHparc+FQ16GtrLwJNzqMb0eT8pVValtQ5Pc6hc0znoK7rqFavhrPPBrsdzjoL5s4Fw4lLOoUQQgghWjsJSgnRxpU4PHWTtk5NDl6WVECaP8iVbZNm521JoHQvNdyIXnf0oMaYJCsdLHpqPSrf5FajSgCjVahye9kZaHAe3/CysGSrAYtewelVyauRYPKxNHbyXrWrmrc3vA3ArcNuDfayjrR5M0yeDFVVMG4cfPIJmEzNv18hhBBCiBYgQSkh2rjfCrQsqe7Rpno1Q26onjHayc+2cqcELdqQQJPzzhHHzpwz6BTO7hyJDthe4eKPCin3ag02ljpRgbRwAx3CGv6a1ikK3aSE74QqG1m+N3fjXGxOG5lxmUzMmNgcSztg506YOBHKymDkSPjqKwgLa959CiGEEEK0IAlKCdGGFds9dYGEU4PYS+pg3aJMmHQKNrePfJnC1yYc0k8q8vjlnElWQ920xiX7qqmWMr6Q8qkqG/yle4Mb0OD8cBlR2s99lwSljsnmP9ajjfX/KKSqal2D81uH3YpOacaPUTk5MGECFBbCgAGwaBFERjbf/oQQQgghQkCCUkK0Yb8V1gJaNlNiM2RJgZZN093fo+aPcmmI3RYU2b04vFo/qfpkz41OCiMxTI/Dq7I4R8r4Qinb5sbm9hGmV+jZwAbnB+vqz5Qqsnup8mcEiQNUVW1Uo/Pf9v3Gxv0bCTOEcfWgq5tpdWiBqAkTtMBUjx7a1L3Y2ObbnxBCCCFEiEhQSog2qsjuYVszZ0kF9Ir1l/BVuCRg0QYEsqQ6hRvR12NcvF7xl/EpkGVzsUWCjyGzrsQOQP94C4Zj9AKrD6tBV9fgXvrBHcnhVQkkBTakp9SLq14EYFb/WcSGNVOQqKQEJk3SSvc6d4Zvv4XExObZlxBCCCFEiElQSog26tcCLUuqV4ypUX1nGqJrpAmzXqHa7SNXGie3envrWbp3sI5hBsb4g5sr9tsl+BgClS4vu/wBpMY0OD9cYAqflPAdyebWju9wg1Lv4F9hdSGfbv0UgNuG39Y8C9uyBUaMgE2bIDkZvvsOOnVqnn0JIYQQQrQCIQ1K/fzzz0ybNo2UlBQUReGLL7447vWvvvpqFEU5Yuvbt2/LLFiIVqKw1sOOypbJkgIp4WtLfKrKvuoTNzk/mqEdLBh1UOLwSvAxBAK9pDpHGImzNKz59tFk+Ev49lS58fokyHiwQD+phpTuvbH2Ddw+N6PTRjM4eXDwF/X11zBqFOzeDd26wfffQ0ZG8PcjhBBCCNGKhDQoVVNTw8CBA3nxxRfrdf1nn32WgoKCum3fvn3ExcUxc+bMZl6pEK3Lr/5eUn1izSQ0c5ZUQG9/f5vtFU58kkXTahXZvTi9Kmad0uA+Yxa9jt6x2s95vT9AIlqGV1XZUNr0BucHSwzTE25QcPlU9tVICd/BqlyBoFT9PgZ5fB5eXfMq0AxZUqoKjz8O550H1dVwxhmwciX06hXc/QghhBBCtEItczZ7DFOnTmXq1Kn1vn50dDTR0dF133/xxReUl5dzzTXXNMfyhGiVCms9ZFW6UIAxSS03GrxLpBGLXqHGo2XidI40tdi+Rf3trdIy6NIiDOjq0U/qcIMTLGwsdbKtwsmZnnCsBqnybglZlS5qPCrhhgNZiU2lKArdokxsKnOSbXPTRV6zdeoypeo5ee+r7V+Ra8ulg7UDM/rMCN5C7Ha47jr44APt+1tvhf/+F4wNy3IUQgghhGirQhqUaqo333yTCRMm0Llz52Nex+l04nQeKDey2WwAuN1u3O62/ZfjwPrb+uMQDfNzfg0AvWOMROnVRv/8G3P8ZEYa2FzhZmuZgxRL45swi+ZTF5Sy6ht1bCQYoaNFR5HDx4biWoYlNH4CXHNpj+99a4u0Buf9Yk34vB58QRqY1zlcz6YyyKp0MrZj8wal9lV7WFfm5LRECzHmppcfNofAMVPh1J7gCEP9jqMXfn8BgGsGXYNO1QXn2MvLQz9jBro1a1ANBnz//S++G28MLLTp9y+Crj2+94iWI8ePaKvk2BWNVd9jRlFbSTdbRVH4/PPPmT59er2un5+fT3p6Ou+//z4XXXTRMa/30EMP8fDDDx9x+fvvv4/V2vy9eIQIJrspgj1JA0FVyShYi8nTsiVW1ZYY9nXsi97ronveKgJhKVVVKXGXUOgsJNOaSZi+5TK4xAEqsCNtJD6dgS4F6wlz1zTqfsrDEymMz8TkttOtYC0SfmxeLoOFXSlDtdd1/hpM3uD1bfMqenakjQRFISNvdVDv+2AqsDtpEE5TOBZnFV32b2zVx83uxAE4zJGkFf9BpL3suNfd59jHHdvuQIeOV/u8SgdThybvP2bHDkY+9hiW8nKckZGs+r//o7R//ybfrxBCCCFEa1FbW8usWbOorKwkKirqmNdrs5lS77zzDjExMScMYs2ePZt777237nubzUanTp2YNGnScZ+YtsDtdrN06VImTpyIUVL9Twqf7qmBag99Y01M6T++SffVmOPHq6q8sq2Kcredii56csrXsip/FavyV7G/Zj8AqZGpPDv5Wc7tcW6T1icarqDWw7bsGsw6OH/CaY0q3wNweVVe3W7DZQyj32mT6BzRun5VtLf3vp8L7ewqcdE10sj0yWcG/f4/yq4mt9ZL52GnMji+eTLfcms8bNutBUEd5kgSho1nZMfg9MYKpsCxo1gjwQvjRg0nMez4WV33LLkHgLN7nM1V069q8hqUuXPR/7//h+J0ovbti+7TTxnZrVuT71c0v/b23iNalhw/oq2SY1c0VqBK7URa15lGPamqyv/+9z+uuOIKTKbjlyOYzWbM5iM/hBuNxnbzompPj0UcW16Nmz3VHhRgbEoERmNwymNOdPx4fV7WFKxhZd5KVuat5Ps9y8mzZR1xPb2iJ9oSTV5VHjM+mcGFvS/kuanPkRKZEpR1ihPLd2gpsumRJswneG88HqMR+sZZWFfiYHOFm8zY1pn51h7e+zw+lc0V2s9tSMewoDyewupCdIqOWEssRr2RzBgzubW17K3xMiKpeZ6vzRVa1masWUe508eyYic94sLo2EKDGBrCp+iw+8sj460mjMfpm1btqmbOpjkA3DHijqb9fLxeeOABePJJ7ftzz0V57z2MkZGNv08REu3hvUeEjhw/oq2SY1c0VH2Pl9b3abEefvrpJ7KysrjuuutCvRQhWswvBdrEvf7x5hbp1+LxeZi7cS6P/PIIO8t2HvH/8eFdmNBlFKPSRjIidQSDkgahoPCPn/7BU8ue4tM/PmVp9lKemPAENw69EZ0iDbObW06VPygV0fQPDIPitaDUjgoX1W4fEfVsCC0aZkeFC7tHJdKoIyOqaT2ftpds576l9zF/x/y6yyJNkcRY4vAqUViNsXzUIZEEaxxxYdqWFpVGZlwmmXGZxFhiGrXfUnsNP+ZsoKg6m95RFeyyuSh2eNiwT8fIjmFHzdjTKTrGdRlHnw59GvtwG82t155now4s+uNnE7638T1sThvd47pzZrcmZLEVFsJVV8GSJdr3DzwA//wn6OR1JYQQQoiTW0iDUtXV1WRlHci42L17N+vXrycuLo709HRmz55NXl4e77777iG3e/PNNxk5ciT9+vVr6SULERL7qt3sqXKjA05JbN5eaG6vmzkb5/DIL4+QXZ4NQJQ5ijGdxjAydSTDUoazwZaJTh/HxRlRdD3sRPqxCY9xSb9LuOHrG1iVv4pbFtzC3E1zee2c1+jdoXezrv1k5lVV9tUELyiVaDWQGm4gr8bDxlIHpyRJD75gU1WVtSVag/OB8ZZGl1uW1pby8E8P8/Lql/H4PCgoqGjtIqtcVVS5ququm1Vy7PuJD4une3x3LUgVm1kXrMqMy8RisLCrfBc7S3eSVZalbeXa11xb7jHv8/0TrH181/HcMeIOzulxDgZdy3wk8ei17Okokx7lOM+5qqq8tOolAG4dfmvjA+uLFsHVV0NREVgs8OabMGtW4+5LCCGEEKKdCWlQavXq1Zxxxhl13wd6P1111VW8/fbbFBQUkJOTc8htKisr+fTTT3n22WdbdK1ChNK6Eq00pjmzpFxeF++sf4dHf32UPRV7AOhg7cB9p9zHLcNuIdJ8oMTEsK+adSUO/qhwHhGUAhiYNJDl1y3nhZUv8Nfv/8qvOb8y8JWBPDD2AWafOhuzofVNdGvrCms9uH0QplfoeIIeOfU1KN5CXk01G0odjEo8esaLaBxVVVm8r5rcGg86BQY0oteTy+vixZUv8o+f/0GFowKAaT2m8eTEJ+ke150KRwVl9jLK7GUsycljc2kRCaZqOpprKLOXUWIvIacyh6yyLAqrCym1l1KaW8qK3BUNXovFEEW3uEx6xXfBoDNgc/nIq3GjAF0iTVgMhx475fZyvtv9Hd/v/p7vd39PenQ6tw67leuGXEeCNaHB+28It//9J/oE2X+/5vzKpqJNWI1Wrh50dcN35HTC7NnwzDPa9/37wwcfQN++Db8vIYQQQoh2KqRBqXHjxnG84X9vv/32EZdFR0dTW1vbjKsSonVx+1SyKl0ADIgPfuNgp8fJmxve5LFfHyOnUgsCdwzvyP+d8n/cPOxmwk3hR9ymV4yprrRrcpqKXndksEKv03PXqLuY3ms6ty68lYU7F/LwTw/z0ZaPeO2c1xjbeWzQH8vJbK+/dK9ThPG42R8N0SvWzLd5NVS6fOy2ucmIblp5mdD4VJVFOdVsKnOiAGenRxBlqn8gUVVVvtz+JX9e+meyyrRs4wGJA/jPpP8woduEuuvFW+OJt8YDEBfp5NPsKmJMOm7qE3vEMVLtqmZX2a4DWVD+TKidpTvJq8rT7i8s/pDsqcy4TMLNnVlbkUiMOY47+sdj8pfDqarKF3uq2F7hIsGi5+qeMRgOe5/Iqczh5VUv8/ra18mpzOH+7+7n7z/+nVn9Z3H7iNsZkjykwc9tfQSCUid6zl9c9SIAl/W/rOGljdu3w6WXwrp12ve33w5PPaVlSgkhhBBCiDptsqeUECeTbJsLl08lyqgjxRq8l6zD42BB8QJue/m2upPOpIgk/jLmL9w49EasxmOXa3WKMBJuUKjxqOypOn6wonNMZ+ZfOp95W+dx56I72VayjdPePo2L+l7ElQOuZFLGJIx6aZrYVDnVWlCqc2TwnkujTqF/nJnVxQ7WlTokKBUEPlVlYU41m/0BqWmdI+kTV/8sqXUF67h3yb38uOdHABLDE/nX+H9xzaBr0OuOHWTpHGFCr0CFy0eZ00u85dD3kghTBAOTBjIwaeARt6111+Lyuo4amPlit41wk4t+8Za6gBSAoihMTotgX3U5JQ4vvxbWMi7l0AB3enQ6j014jL+P+zsfbf6I51c+z5qCNby1/i3eWv8Wo9NGc/uI25nRZwYmffCOPXdd+d6xM6X2Ve7j0z8+BbTSvXpTVXj7bS0IVVsL8fHwv//BuTKNVAghhBDiaKTDphCt3LZyJ6BlrQQrA6bKWcXot0bzet7r5FXlkRqZyvNTnyf7zmzuHnX3cQNSADpFoWeMdmL3R4XzhPtTFIWL+l7EH7f9wfWDrwfg4y0fc84H55D8n2RuXXArv+b8ik/1Nf3BnYS8PpXc6uD1kzrYoAQts2NXpQubyxvU+z7Z+FSVBXsPBKTO7VL/gNT+6v1c++W1DH1tKD/u+RGz3swDpz7Azjt2cv2Q648bkAIw6RU6+Y+NXTZ3g9ZtNVqPGpCqdvvYUaFlcQ46Shan1ahjcqcIAH7fbyev5uj7tRgsXDXoKlbdsIrl1y3nsv6XYdQZWZ67nMs+u4x+L/Vj4/6NDVrz8RzIlDr6RyCX18Wln16Kx+dhbPpYBiUNqt8dV1Ro2VHXXqsFpMaPhw0bJCAlhBBCCHEcEpQSohVzeVWybNpJX+/Y4GQKqKrKTfNvYkvxFqIN0Tw/+Xmy7szi9hG3E2YMq/f99I7VTux2Vrrw+I5dhnuw2LBYXj/3ddbcuIY7R9xJx/COlNpLeXn1y4x9ayzdnu3G7G9ns7loc6Me28kqv9aDRwWrQSHBEtyeYwkWA50iDKjAhlJHUO+7LVFVlT1VLn7Kr2FvlavBt/epKvP3VrOl3IkOOK9rZN1r6ES2l2xnxBsjeGv9W6iozOo/i+23b+eRMx85pNfbiQSm+2XbGr7+o9lU6sAHpFgNJB4ji7NnjJm+sWZUYMHeatzHea9QFIVRaaN474L3yLknh3+M+weJ4YnsLNvJqDdGMWfDnKCsO5ApFX2M8r37ltzHb/t+I8ocxRvnvlG/O122DAYNgo8+Ar0eHntMm7SXmhqUNQshhBBCtFcSlBKiFcu2uXD7INqkIyksOKV7b6x9gw82f4Be0TO762xuGnoTFkPD+5ykhRuIMOpwerUSvoYYkjyEZ6c+S969eXxz+TdcNfAqIk2R7K3cy+O/PU7/l/sz4OUBPP7r42wr2YbH52nw+k4mOQdlSQUrm+5ggxO0YOWGUie+4/QBbI9UVWW3zcV7Oyv5MMvG8v12PsiyMXdnBXuqXMftixjgVVW+2lPF1oMCUr1i6heQWpW3ilPfOpWcyhx6xPdg+XXLmXvBXDrHdG7wYwkEpXKq3bi8Tfs5+lSV9f4g5eCE479/TEwLJ8Kgo8zp5ef8mnrdf1JEEv/v9P/Hllu3MDljMnaPnSu/uJJb5t+C03Pi7MxjUVX1uJlS7218j+dXPg/AnPPn0CO+x/Hv0OuFRx6B006DvXuha1f47Te4/34tOCWEEEIIIY5LglJCtGKB0rjeMcEp3du4fyN3Lr4TgH+O+ye9wns1+r4URaFXjHaS+0d5404SDToDkzIm8fb0t9l/334+nvEx03tNx6Q3saloE7O/m03vF3sT9kgY3Z/vztS5U7lj4R08u+JZFuxYwPaS7bi8wcn6aMsCTc6DXboX0CPahNWgUO321TXdb+9UVWVXpYs5Oyr5aJeNvBoPegUyoozoFdhX7eHDLBtzd1ay23bs4FQgILWtwoVOgfO7RdaVvp7It9nfcsY7Z1BSW8KwlGH8es2vjEob1ejHFGvWEWPS4VNhTyOyvQ622+am0uXDrFfodYKML4tBx5R0rYxvVbGDfdX1D2LHW+NZMGsBfz/97ygovLLmFU57+7S6oQwNVeNRQdGhAJGHTd9bX7ieG7++EYC/jf0b5/Y8QdldYSFMngx/+5sWnJo1C9avh5EjG7U2IYQQQoiTkTQ6F6KVcnm1k2LghCd99VHtquaieRfh8DiYmjmVe0fdy+JFi5t0n71itCbYgRK+w6drNUSYMYyZfWcys+9Myu3lfPrHp7y/6X1W5K7A7rHXTQQ7nE7RkR6dTp8OfXjwtAcZmXZynRB6fGpdr55gNjk/mEGn0D/Owu9FdtaXOOhRz6BKW6SqWsnsb4V2Cmu1DD2DovXWGpVoJcKow+by1j0XuTUePtplI8VqYEySlW5RB7LVvD6VL/dUsaPShV6B87tGkVnPZvHztszjss8uw+1zc2bXM/n84s8bVKp3NIqikBFtYk2xg2ybu0k/x0CWVP84M8Z6vO4zo030jzOzqczJgr1VXNsr9pDG6Mej1+l5aNxDjEwdyWWfXcbKvJUMeXUIH1z4ARMzJjZo3Ta31rcuwqigOyjQX2Yv44KPLsDusTMlcwoPjXvo+He0dClcfjkUFYHVCi+9BFdeCc2QqSiEEEII0Z5JUEqIVirL5sKjatkNiWFNKwNRVZVbF9zK9tLtpEam8u7576JTmp4omRpuIMqow+b2kW1zBS1YERsWy/VDruf6IdfjU30UVBUcdVR9VlkWNe4a9lTsYU/FHr7J+oYHT3+QB8Y+gEF3cry95dd48KoQblCIMzdfudCgBC0olV3lpsLpJaYZ9xUKqqqys9LFb4W17LdrDd2NOq10cUTHMCIOyqqJMumZmBbB6EQrK/bXsr7EQX6th3nZNpL9wakukUa+2FNFlj8gdUHXqHpPL3xl9SvcuuBWVFRm9JnBe+e/h9kQnNdWRpQWlNrlz+5qTAamzeWty5gbdILSvYOdmRbO3io3FS4fP+bXMMnfBL2+pnafytqb1jLj4xmsKVjD5Pcm888z/snssbPr/X4WCEpFHfTz9Pq8XPbZZeyu2E3XmK7MvWDusRvHezzw4IPw+OPapL3+/eHjj6FX47NOhRBCCCFOZifHWZsQbVCgJC4YpXtvr3+bORvnoFf0fHDhByRYE3C7G9YH6mgURaFnjIlVxQ62VQQvKHUwnaIjNSqV1KhUTu9y+iH/p6oq+2v2s2H/dv6z4kWWZs3j7z/+ncVZi3nvgvfoFtst6OtpbfZWa8GBzpGmZuknFRBr1tM10sjuKjfrSx2MSwlvtn0Fg6qqbCl3sr/2xP3IVLQSyGLHgWDUEH8wKtx47GBHhFHHhLQIRiVaWVlkZ12JnYJaD59k27DoFRxeFYMCF3SLolvUiQNSqqryr5//xYM/PgjAzUNv5oWzXjjhZL2G6BRhxKBAldtHscNLx0b0qttQ6kAFOkUYSLAcdHtVBbsdTCYwHHm/Fr2OqekRfLTLxtoSBz1iTHSJbNgAhy4xXfj12l+5c9GdvL72df72w99YkbeCd6e/S2xY7AlvX+XSyiwPLt17+KeHWZy1GIvBwmcXf0ZcWNzRb7xvnzZd77fftO9vvhmefhrC6j8gQgghhBBCHEqCUkK0Qk6vr25CVlNL97YUbeG2hbcB8I8z/sHYzmObvL6D9Y41s6rYwc5KJ25fRL1KeYJFURSSIpJYWRLOGb1fZHDqZF5ZcS/Lc5cz8JWBPD/1ea4aeFWzBmtC7eAm581tUIKF3VVuNpY6GJtkRd+CP+uGUFWV7/NqWFXcsGmBJp3C0A4WhncMw2qofyZhhFHH+NRwRnYMY1WRnTUl9rqA1IxuUXSpR0DKp/q4e/HddU22/99p/4+Hxz0c9GPXqFPoHGlkl81Nts3VsKCU3Y4vJ4fSn7fQPy+X4c4SKM7XgjU5OdrXGn8jc4MBLBYtYHPQ165hYdyoGCnXm6nt1BnfKYPQ9e0LffpAYmK9yt8sBguvTXuN0WmjuXXhrczfMZ+hrw3l04s+ZXDy4OPedl+NFqSM8Tc5/2r7V/zz538C8Pq01xmUNOjoN/zqK7jmGigrg6goeOMNmDmzfs+bEEIIIYQ4JglKCdEK7ax04VUhzqyng6XxWRI1rhou+uQi7B47kzImcf+p9wdxlZpkq4Eokw6by8cum6veU8WCaV+1G0VRiI85j1+uHcftC67hl5xfuObLa1iwcwGvnP0K8db4Fl9Xc3P7VPL9J9ktEZTKjDah81Wxo2QXz67az/CkdEaljcKob/5915eqqnyXV8Nqf0BqULwFi+HEgQ6rQUf/ODNhDQhGHS7cqGNcajgjEsPYUuakU4SRJOuJf826vC6u+fIa3t/0PgDPTnmWO0fe2eh14HZDVRVUVx/61f/vkfmlJOSXE+6pBYsPHA4tw+l4X8vKoKQEHTC9PmvweLT9Vlcf8V9x/g2ADw/6j9hYLTjVfTsz/gAAWIZJREFUpw8EAlV9+kBKylGDVdcMvoZBSYO48OML2V2xmxFvjGBmn5ncPuJ2RqeNPiKgl1/jZne1B1SVPjFGdpbu5IrPrwDg9uG3c/mAy498HE4n/OUv8Oyz2vfDhsFHH0G39p+FKYQQQgjREiQoJUQrtK1cy5LqHdu0kqw7Ft3B1uKtJEckM+f8OUHpI3U4RVHoHWPm9yI728qdLR6Usrm8VPn7xPhUyHck8sNVP/Dkb0/y4I8P8snWT1i2bxnvTH+HCd0mtOjamltejRuvqpUixZqD87NVVZUye9kxe3iV2ksPuX6UOYoJ3SYwJWMKUzKn0Cm6U1DWcSI+1Ue5vZyqqirK7GWU2csotZeyLL+Q7eXF2N3lxBqr8aX24ZZht9SrtCtYrAYdwzvWr6SrxlXDjHkzWJy1GIPOwDvT32FW/1nHv5GqapPfsrO1bffuA//Ozob8fO06x5Du3xrDbQ2nMjEVXXon4rp3hU6dID39wNfERC0o5nAcPbjl//fOvFKKN20nZe9OOu/bibJrF5SXa6VxgfK4gNRUOO88mD4dTj9dKw/0G5w8mDU3ruGaL6/hy+1f8sHmD/hg8wcMSR7C7cNv55J+lxBm1H4WvxbWAhBdU4RRMXD+R+djc9oY02kM/5n8nyMf7K5dcPHFsGaN9v0992i9pEwNKzkUQgghhBDHJkEpIVoZh8dHtn9ce1MCPHM2zOGt9W+hU3S8f+H7dAzvGKwlHqF3rBaU2mVz4fKq9Z6qFQx5/kyhQA+fjaUORiWGMXvsbCZmTOSyzy5jR+kOJs6ZyD2j7uHRMx/FYqh/c+bWLKfqQOleU8u87G47z6x4hv+u+C/FtcXHvW6kuSOx1s7UOHZTai/hsz8+47M/PgOgT4c+TM2cypTMKYxNHxuUBt0ltSWsylvF73m/szJvJWsL1lJUU4S64diBl4D52+HxXx/nzpF3cs+oe1pVxtyyfcu4feHtrCtcR5ghjE8v+pSp3aceuILLBdu3w4YNsHEj/PHHgSCU3X7iHZhMEBmpbRERh3zd4TNTZQqjS2IM8bERRy21q/tqsUB0NJWJqbycr4KicFOfWGhCs/t0r48FW8pxeFWmdY6gb5gKO3bA1q2Hbjt3Ql6eNt3upZcgOhrOPhvOPx8mT4bISGLDYvniki9YW7CWF1e+yPub32dtwVqu/epa/rz0z1w/5HrO73M92bYYFCC+ch83LXiYLcVbSIpIYt7MeZj0/kBTQQEsXAjz58M332jPc1wcvP02TJvW6McrhBBCCCGOToJSQrQyOytd+FRIsOjp0IgmxADbSrZxy4JbAPj76X9nXJdxQVzhkRLD9MSYdFT4S/h6N7EPVkPk1WiBmT6xZsqcXvZUuVm2v5az0iMZljKMtTeu5b4l9/HKmld4ZsUzfJv9LR/N+IjeHXq32BqbS10/qcjGl8+pqsqHmz/k/u/uJ6cyp+7y1MhUMuMy6R7Xncy4zLotIy6DRfu87LK5GZpgIka3ncVZi1m8azErclewtXgrW4u38p/l/8FqtHJGlzMY02kMHcI7EBcWd8QWZgg7JKBmd9tZV7iO33N/Z2X+SlbmrSS7PPuY6w83hhMXFofREAO6aKzGGHrGdiQztgMRpgg+3vIxm4o28cgvj/Ds789y+/DbuXf0vXQI79Do56ypdpfv5v7v7ufjLR8DEGuJZcnEOQzbrYcv/60FoDZs0IJQxxpIoNNp2Unduh3YunbVvnbpopXCHSejJzevhpVFdvrEmpnWOaJeQc31+TWg2OkSaSS2idMXzXodIzqG8XNBLb8W1tK7dyy6gQNh4MBDr+hwwPffwxdfwJdfQlERvP++tpnNMGGClkE1bRpDkofw5nlv8uTEJ3lz3Zu8tOol9lbu5YnfnuDJZU/RO3EKs/rfxKKCz5mXPw+DzsAnF35M8o58mP+aFohavfrQ/Z96qravTi2TASiEEEIIcbJRVPU4Of7tkM1mIzo6msrKSqKiokK9nCZxu90sXLiQs846C6Ox9fR0EU0zb1clu2xuTk2ycmqytcG3t7vtjHxjJJuKNjG+63iWXL7kqNO7gn38/JhXw4oiO31jzUzrEtnk+6uvd7ZXUFDrYVrnCGLMeubsqEQBbuwTe8iJ8/wd87n2y2spri2mU1Qn1ty4JqSBiaaqdHl5dUs5PuDmPrHENCJIsHzfcu755h5+z/sdgE5RnXjszMc4v/f5WI3HPvayKl11E+Zu6xdX19y+3F7O0uylWpAqazEF1QUnXINZbyYuLI54azwKCluLt+JVvUdcr2d8T0akjmBE6ggGdxzMztU7mXH2DMIt4SzaV83GUicKcHbnCPrFHciE86k+vtz2Jf/4+R+sL1wPgNVo5dZht3LfKfeRGJHYgGesEVRVK0vLz6dmbxYLfn6DDesW09HmJaUaBqmJdCv2oi8uOfrto6JgwAAtWNOvH2RkaIGn9HRowut2b5WLD7JsgNYXbkySlYyoY2fceX0qL24po9ajcn7XSHoGoUzX6fXxypZy7F6Vs9Mj6B9/ggxGrxd+/x0+/1zbdu068H+Koj1HKSnQoQN06IAvIZ4N3nw+KPqZH2s2UBwOxf7D+szd8Kh9NH1X7tGyow42fDicc462DR5cr+br4uQgn7tEU8jxI9oqOXZFY9U39iKZUkK0InaPj902LTOid2zj+pbctfguNhVtIjE8kbkXzA3qOPnjSY0wQBEUOzwtsj/QGn3vr9X2lxpuJMasp1ukkewqN8sKazm784Hg2Dk9zmHTLZs47e3T2FG6g0s+vYRvLv8Gg65tvg0uK6zFB3SOMDY4ILWnYg/3f3s/H235CNCyjWafOpt7R99b13/neLpFGYky6rC5fWyvcNYFgWLDYrmo70Vc1PciVFVlU9EmFu1cxNaSrXV9nw7ePD4PTq+TguqCQwJYieGJjEwbyYgULQg1LGXYIT2h3G43JRtKMOpNLMypZlOZFpA6p3MEfeMODWzoFB3n9z6f6b2mM3/HfB7+6WHWFKzh38v/zYurXuTmYTfz51P+THJkcoOew0PY7ZCVpZWfBbadOyE3V+v95HRqzzNwkX87YL/2RVGge/cDAajA1/T0ZgmKpEcYOTXJyor9tRTUevgk20ZimJ4xSVa6Rx/Zy25HpYtaj0q4QSEzOjg9lcx6HSMTw/gxv5bfCmvpG2dGd7zHqtfDKado25NPauV9X3yhbatXw/r12uanAwb7t6Nbrn2JiIBJk7Qg1NSpkJTU9AcnhBBCCCHqpW2ejQnRTu2odOEDOlj0xFsa9vKscFRw+8LbmbtpLgoKcy+YS1JEy51cJfjXW+rw4lPV459cBklhrQcfEG5QiPaPeD812Up2VSWby5yMTrQSd9D0wsSIRD6/+HNGvD6C73d/zwPfPcCTE59s9nUGW4XTy6ZSLdAxtgHZdDanjcd+eYxnVjyD0+tEQeHawdfyzzP+2aCgjE5RGJhg4ZeCWtaXOA7JTApQFIUBiQMYkDjgqPehqirVrupDmpQ7PA4GJA6gU1SnE5aTqcA3eXa2VrhRgGldIulznLJRRVGY1nMa5/Q4h0VZi3j4p4dZmbeSZ1Y8w8urX+aqgVdxRpczGJE6gi4xXY7cf1UV5OTA3r1awOngAFROztF3epAyC+RHQmWclc69RpLaczhKSgokJ2vldv36gbXhmZGNpSgKpyZbGZxgYWWRnbUldvbbvXy2u4qO/uBUj4OCU+tLtGmGA+Mt6IP42h6SEMbKIjsVLh+by5wMOFG21IEHoE3o69sX/vpX2LcP1q2D4uJDNntBERV5hVjLS4mqLEXx9+Lyde2K7txztf5Up52mlQIKIYQQQogWJ0EpIVqRbeVaoKGhPZl+3vszV3x+BTmVOegUHU9Pepozu53ZHEs8phiTDoMCHhUqnL5DgkHNJdBPKjX8QNlRSriRjCgju2xufiusPaKUsE+HPrx13ltc9MlFPLXsKYanDGdm35nNvtZg+s2fJdU10khaxInTqL0+L2+ue5P/98P/o6imCIDxXcfz9KSnGZg08AS3PrqB8RZ+Laglt8ZDkd1Dxwb2P1MUhUhzJJHmSDrHdG7QbX2qSn58D2z+gNR5XSLpVc/XjKIonNX9rP/f3n2HR12mbR//Tp900khC6E2KGAJYAAURaUpTQQSVVVx3Vx9fO7jigiKubdXHvvrYxbqrAmtDsbAKUqQJUg29hECA9GQy5ff+MWQUaUkmyWSS83McOTTJzOQeOHOHuXLd149h7Yfx5ZYv/cWpHYv54qsX2ZD/Ih/nQ+fSSDLdSXQodpB62E3UvkOY8wtO/sBxcXDaaRgdO1LethV7UiN5NudjPspbwr5oiIpN4L7+9/GXXn/BZqk/re9RNjMD0qM4OyWCH/eXsuJAGftLvczeVkiy01+cSoqwsKPI/2edkVSzFwmwW0yc3TSCb3/TLVWtoleLFsed+/TRL3nsKvKQmeRkSIto3Hl5fD13LgPHj8esq+iJiIiIhJyKUtLolXl87Cxy0zbWjtUcutkhJR4f249cTa2yV90r95Zz77f38siiRzAwaBvflrcueYveLXrX5lKPy2Qykei0kFPq5UCZp46KUhVH947eys5Li2JLQR7rD7vokxpxTNfZ2K5jmbx3Mv/44R9cO/daujbtSpfkLtVaw4FSD4VuH21igr8CXmUcdnn5+ZC/eHmqmWMen4d3177LA98/wOaDmwHomNiRxwY9xvCOw4Nab7TNTMcmdjbllbM6t4zBLaKr/VhV4TMMPttdSkFUMmZgVGXnGxkG7N0b6G4ybd7MkM2bGbz5IMZWC2bPb+dYlQDHdj8VR9koTk3iYHo8u9Oi2JFsY3MirIt3s9VSwKGy7RwqXYnH54H9gAlsiTZuOusmpvWbdtQRxPom0mqmf7Mozmr6a3HqQJmXOdsLsfmbEGkbayPOXvPf1z2S/d1S+eU+1h500b2GCl87CsvZVeTBYoLeKUeOpUZF4WrSRHOiREREROoJFaWkUSty+3g3K5+DZV5aRdsY0y42MLS5rm3OK8fAfyW7yhR0NuZu5MqPrmRl9koAru1+LU8NfYoYR90NGf+95AgrOaVecsu8nFbLX8swjKM6pX4rNdJKhzg7v+SXs2hfKSOPM3j9wYEPsnzvcr7d/i2XvH8Jy/64jDhnXKW/fk6Jh0X7SticXw7A0BbRNfZi+mQW7SvBANrF2o553hXcXjdvr32bv3//d7IOZQGQEJHA9H7TueHMG7BbaqZDpHuik0155aw75OL8ZlHYLbX/vbPyQBmb8t1g+BjeKvr4BSmfD1auhC++8F/JruKYXUnJMTc1HXnD4YAWLfC2aM6hxEi2xnpZa8/jB9Mulpj2sisOihxuIPvI22/kHbsEh8XBxR0v5uGBD9MhsUPQz7uuRFjN9KsoTh0oZfmBMlxe//VQMpNOPW+sOmxmE+ekRPL1nmJ+2FdCtwQHliD3YcMw+D7b//edkegkthaKaSIiIiISPBWlpNEqdHt595cCDrn8HRI7itz8e0sBY9rG1smL69/bcOTo3qm6pAzD4IXlL3DHl3dQ6iklISKB/xv+f1zW5bK6WOZJJR0ppuWW1v6w87xyHyUeA4vJX4T6vXNTI/klv9zfLZUSQdLvjpdZzVbeH/M+Pf+vJ5sPbuYPc/7AR+M+wmwyn/Tr7ivxsHBfCVlHilEVvtlTTOuYqg8dr4qDZR7WVXRJpR7bJVXuLWfWT7N4cOGDbD28FYDEiETu7HMn/3Pm/9R4wbJ1jI0mdjN55T425LnIqOw8oGpy+wwW5/gLDSmHt9GhW49fP5mTA19+CfPm+f+be5yr2Vks/ivXdezof+vQ4df/T08HsxkLkHzk7Wzgj8Ch0kMs37ucpbuXsj53PdG2aBIjE0mISDjhW4Q1ok4652qL02rmvLQozmwawercMgzDXwitLd2TnCzNKaXA7WPNobKgC2DbC93sLj7SJZVaO8U0EREREQmeilLSKBWWe3knK5/DLh+xNjPnN4ti3q4idha5+ffWfMa2javTwlSx23+EEE4+TyqnKIfr/nMdn/7yKQAXtr2Q10e9Tnpsep2s81Qqhp3nlnlPccvgVXRJpURYj3vsMiXSSsc4O5vzy1m4r4TRbY69DGlyVDIfXv4h5752LnM3zeXhhQ8z9bypx/16e4v9M6q2HLk6ogn/31XvlAi+2FXE7mIPn+0sYnz72ForRvywrxQDaB9rJ+03XVLl3nJeW/UaDy18iB35O/zPLTKZyX0mc8OZNxBtr52jdSaTicwkJ9/uLWFVblmtF6VW5ZZR7DGItZlIyNuD6bsi+PprfyFq1aqjbxwTAxdeCH37wmmn+QtPbdpANS5lnBCRwOB2gxncbnANPZPw4bSYOSel9gew28wmzkmN4Kvdxfywr5RuCc5qH6c2DIOF+/zFy8wkJzE2dUmJiIiI1FcqSkmjU1Du5Z1f8skr9xFnNzO+fRxNHBbiHGb+lVXAriIP/9qSz9h2sTgsJ++aqSmb8lwY+Dt+TtRp88nmT5g0dxIHSg7gsDh4+MKHufnsm0/Z2VOXKjqlDrq8eA2jRq/S9Xsnmif1W+emRbI5v5yNeeUnHMZ9ZvqZPHfRc1z/8fX87Zu/0atZr6OKD3uK3SzMLmFb4a/FqK4J/mJUxayqi1vF8OrGw+wscrMyt4yeyTXfmZFb5mHd4aNnSbk8Ll5Z9QoPL3yYXQW7AEiNTmVKnyn8udefibTVfjGhW4KT77JL2FfiYV+J57hda0ErK8O9fiOH5i/jvF820nXXJqK+/xbrkSupBfToAUOHwpAh0Lt3tQpQEjrdE/3dUoVuHz8drP730bZCN3uKPVhN1ElBTURERESqT0UpaVTyy728+5uC1IQOcYHBvelRNsa1j+X9LQXsLvbwry0FXF5HhamNef6jYJ2bHDvrx+11M2X+FJ5c+iQA3Zp24+1L36ZbSrdaX1dVxdnN2Mzg9sHhMu8xR+ZqUmCe1EmuPtc0wkqnJnY25pWzaF8JlxynWwrgjz3+yLI9y3hp5UuM/3A8K/60Aos1nUX7SgLD503A6QkO+qRGEv+7wmG8w8L5zaKYv7uYBXuLaRtrP+Y2wVp0ZD5Ohzg7KREW3ln7Dnd/fTc78/0DuZvFNOOuvndxfY/ribDV3XGlSJuZ05o4WH/YxarcUoa1DOKIYEkJbNwI69bB+vW/vm3dis3nY+jvbm4kJWEaMsRfiBo0CFJSgnouElpWs4neKRF8ubuYxTmlZCRWvVvqt7OkMpOcRNvqT9FeRERERI6lopQ0GnkuL+9m5ZNf7qPJkYLU74ffNouycUX7WN7LKmBPsYf3swq4vH0szlosTBX95uje7y9rv7dwL5f/+3IW7VoEwG3n3MaDAx/Eaa39gdrVYTKZSHJayS7xkFuLRSmX18eBUv8RwZN1SgH0TY1kY145m/LKySnxkHKCTp5nhj3D6n2r+XHvjwx4cxST+nyCzRKBGeiW6KB3SuRJ50X1SPIP/d5Z5ObTHYVM6BCHuYY6xQ6UethwpHDp8K6m9ytTWLpnKQDpMencfe7dXNfjupDlonuSk/WHXaw/7GJAetSJv198PsjOhm3bYOtW/1vF/2/bBnv2nPBrlMU24UDb04g843SanNGZ74G+N92EzVG5K1VKeDgj0cmSI7OlVueW0atp1QqsWwrcZJd4sJnVJSUiIiISDlSUkkYhz+U/slfg9hHv8B/ZO9HVmNIibYxvH8d7WfnsLfEXpsa1i8VprZ3C1KY8/5GsZpHWoy63vmD7AsZ9MI79xfuJdcTy5ug3GdVpVK2soSYlOS1kl3g4UOahE7VTMMgu8WAAsXbzKefFJEdY6dzEzoY8/2ypy9oe2y1lGAbZpSauOvNV1n8+gO2H1zBnzZ08MPBleqeevBhVwWQycVHLaF7dmMfuYg/LD5RxVhVfUJ/Iwn0lHC7ZyaKsv3P39o8AiLZHc/e5d3PbObfVaWfU8bSIspLotHCwzMv6Qy56VBy78vng5pt/LTpt2wYu18kfLDkZunSBrl39/+3SheVJrfnKHU2808L1nePxejzkf/YZmNUF09BYzSZ6p0bwxa5iFueUkJHkrPQVUQ3DYOGRLqkeSRFEqUtKREREpN5TUUoavMMu/5G9ArePBIeF8R1iT1nISI20BgpT2SUe3ttSwBW1VJgKXHXvSJeUYRg89sNj3P313XgNL2eknMGHl39I+4T2Nf61a0PgCny1OOw8ME+qkvOLzk3zd0v9kl9+1NwjwzDYXugfYL672AOkcmWv/+PlxWNYuetfbNh7NkNb3lbpdTVxWLgg3T80/797i2kXawvMnaqurMOHeOqHGSza+iIenwsTJq7LvI6ZF8wkNTo1qMeuKSaTie6JTr7eU8yq3DIyk5z+Ye9mM/z737B//683tligZUv/VfDatvUPH6/4/7ZtITHxqMd2eX0sXHcYTAZ9UyMxm0zU/hh9CaUzEpwszimloNzHqtzKF3ezCsrZV+rvkjq7hgrCIiIiIlK7VJSSBu1Qmf/IXqHbR6LDwvgOcZWeMZISaWV8hzjezcpnX4mHd7PyuaJ9HBE1WJgqLPceKYZApyZ28svyuXbutczeOBuAq8+4mheGv1AnA6trSnJE7V+BrzLzpH4r0WmlS7yDdYddfJ9dzJi2sWwt8Bej9pb4//wtJv8xtBu7jqRD1CNMnj+ZO768g3fWvsP0/tMZ0XFEpa6ql5HoYFOei22Fbj7ZUcTVHat3jM/j8/DKyleY8tXfKHDlAnBBmwt4YvATZKRmVPnxalu3BAf/3VvMgTIve0s8pFdcHfDee/0DxyuKTi1agLXyP3pWHCijzGuQ4LDQ5SRXppSGw2I20Tclks93FbEkp4Tuic5TXg31t7OkeiZHEKkuKREREZGwoKKUhJ3DLi/5rlMXPNyGwRe7iily+0hyWriifeULUhWaRliZ0N5fmMop9Re4BjSLoqauKbelwD8nKD3Kys68DVz6/qX8cugX7BY7Tw19ij/3/HOlCiH1SUWn1OEyLx6fUe3Lup+IYRiBTqnmUZW/ulrf1EjWH3axpcDNqxvzOHCkaGY9Uow6JyUykI87et9BcXkx//jhH6zIXsGo90bRPbU70/tNZ1SnUSe94qHJZGJYy2he2ZhHdomHpTml9E6tWlFx/pb53P7l7fy8/2cAkqLa8eTQx5jQdVS9zYPTaqZTvIOfD7lYlVv2a1Hqxhur/ZhlXh9L9/uvsNc3NaLGZnRJ/Xd6ooMfckrIL/fxfXYx7WKPvQjEb+WUethf6sVuNqlLSkRERCSMqCglYeWwy8vLGw7jNSp/n+QjBanqzhdJ/k1han+pl/e3FFTrcU5m+/7Z/OWDmyhxl9AitgUfXP4BZ6WfVeNfpy7E2Mw4zCZcPoNDLi9Na3jY+cEyLy6vgdUEyRGVv8JdgtNC1wR/0eRAmRebGTKTIjiracQxxUqTycS959/LjWfeyBOLn+DZH59l9b7VXPqvSzkj5Qym9ZvGpZ0vPWFxKtZu4cL0KD7dWcT3+0poF2ev1J/DhgMbuHP+nXz2y2cARNmbMKDjZK7p/mcua5d4inuHXmaSk58Pudh42MXA9KiguwqX7y/D5TVIdFrorC6pRsViMtE3NZLPdhbx44EyfjxQVqn79Up21mg3q4iIiIjULhWlJKyszi3Da0CE1UR0JV54JDgtDGkeHfRRjqQIKxM6xPHVbn/nVU1xe8uYs/Zevtj8EgCD2g7incveISkyqca+Rl0zmUwkRVjYU+y/Al9NF6X2HDlulxZlxVLFzpn+aZGUenwkO62c2fTUg5CTo5J56MKHuLPPnTy55EmeXvY0a3LWMPbfY+ma3JVp/aYxpssYLOZji2OnJzjYlFdOVkE5n+4oZOJpTU643tySXO5bcB8vLH8Br+HFarZybeaNpCbfRJQ9nvPT46r0PEOlWaSVphEW9pd6+fmQizOD6Fgp8/j48YC/S+rcI7OkpHE5PcHBjkI3+0s9lbp9rN1cYxcXEBEREZG6oaKUhA2Pz2DNQf9vyy9qGU2HuLrtnEh0WhnXvurFAZfHxfa87fxy6BeyDmUd9bY9bztew3+M7G/n/Y37zr/vuAWOcJPkPFKUKvVADXe47Ck6Mk+qCkf3KsTYLYxtV/W/w8TIRGZeMJPbe9/OU0uf4sklT7LuwDqu+PAKOv+3M3f1vYuRp40kPiI+cB+TycTQltG8vOEwOaVefthXwnlpUUc9rsvj4tllzzLzu5nku/IBGHXaKB4d9Cg/5aWwpcBN1wRH0MPS64rJZCIzyckXu/wDz3slO6t93HDZgVJcXoMkp4VOTU5+dEsaJrPJxIjWMaFehoiIiIjUovB4pSMCbMpzUeo1iLGZTzlfJFQMw+CnnJ947+f3WJG9gqxDWezM34nPOHF3VUpUCi+PfJnhHYfX4UprV5LTCrhqZdh5RadUelTdb1/xEfHcd/593HrOrTyz9Bn+d8n/siF3A9fMvQaLycI5zc9haPuhDG0/lB5pPYi2mRncIpr/bC9k8b5SOsQ5SI20YhgGszfOZsr8KWw5vAWA7qndeXzw41zQ5gL2FLvZUpCPCf8srHDSJd7BN3uKOeTysqvIQ8uYqhcPSz0+lu/3F6DPTYust3O0REREREQkOCpKSdhYlet/kZqR6Kx3R3m2HNrCuz+/yztr32FD7oZjPh9tj6Z9Qnv/W7z/vx0SO9A+oT1p0WkN7kV3xbDzmi5KlXp8HDzymNXplKopTZxNmNZ/GreccwvPLnuWt9e+zfoD61m0axGLdi1i2rfTSI5MZkj7IQxpN4RUZx/2lcXy6Y5Cmjs2c9+3k1my+3sAmkal8tfz7ufyrhOxmC3sK/Hw3V7/VcROT3AQ7wivzjmHxUzXeCerD5axKre0WkWpZftLKfcZJDstnBZXPwvQIiIiIiISPBWlJCzklnrYXezBBGQk1o+BxzlFOby/7n3eWfsOS/csDXzcaXUyouMIhrUfRsfEjrRPaE/TqKYNrvB0MslH5kgddtXsFfj2HrnqXrzDTGQ9GGYc64hl6nlTmXreVHbm7+SLrC+Yt2Ue87fM50DJAd5a8xZvrXkLEyaaN8kgxtmM9fv8Q8ytZif92v8P/drfhMsazaxfCo96bDPh1yVVoXuSvyi1Kb+cYrevShcZKPH4WHFkqPV56pISEREREWnQVJSSsLDqyCyp9nF2Yuyh6xwpcBUwe8Ns3vn5Hb7a+lXgWJ7ZZObCthcy4fQJXNL5EmIdsSFbY30QZTXhtJgo8xocLPOSElkzW82e4urPk6ptLeNacn3P67m+5/W4vW4W717MvKx5fJ71Oav3rWZX3mpgNQBnthzLyK7TiI9MP+HjZSY5aRJmXVIVUiOtpEVayS7xsPZQGeekVL64tizH3yWVEmGhg7qkREREREQaNBWlpN5z+wx+PuQC/C/UQ2X2htlMnDORovKiwMfOTj+bCd0mMK7rOFKiU0K2tvrGZDKR5LSwu9hDbpmnBotS/k6p5vWwKPVbNouNfq360a9VPx4c+CDZhdl8ueVLNuRu4NLOl3JW+lmhXmKt657kJHtnEatzyzi7aUSlOp6K3T5W5B654p66pEREREREGjwVpaTe23DYhctrEGc306Ya82lqwssrX+bPn/wZn+GjY2JHrup2FeO7jad9QvuQrCccJDmtR4pSNTNXymcYZB8Zct4sBEPOg5EWk8Yfuv8h1MuoU52b+Aee55X72F7opk0lLk6wdH8pbp+/06p9Pb2YgYiIiIiI1JzwemUnYccwDMp9Bg5L9ef/rD4y4Lx7YvUvL19dhmHwyKJHuPvruwG4LvM6Xhj+AlazvnVOJSnCf/TsQA0VpQ6Uev1ZMpsCg9Sl/rJbTHSNd7Ayt4wVB8pIOMXfmctrsPKAv0vqvFR1SYmIiIiINAZ6ZS21at6uItYedHFp21jaV2M+TE6Jh70lHswmOCOxbo/u+Qwfk7+czBNLngDgr33/yoMDH9SL5UpKrrgCX6mnRh6vYp5UWpS13l19UY4vM8nJytwysgrKyVpXXqn7NIu00ja2fh/PFBERERGRmhHSy1d99913jBgxgmbNmmEymZgzZ84p7+Nyubjnnnto1aoVDoeD1q1b8+qrr9b+YqXKNuW5+OmgCx/w2c5CSjy+Kj/G6iMDzjvG2at0Ba9gub1urp17baAg9fjgx3nowodUkKqCJKe/5p1X7sPtM4J+vIp5UulhdnSvMUuOsNItwYHVRKXeIq0mLkiP0veZiIiIiEgjEdJXd8XFxWRkZDBp0iQuvfTSSt3n8ssvJycnh1deeYX27duTnZ2Nz1f1YofUrhK3jy92+QeCW0xQ4jH4clcRo9tU/qp0Lq+PdSEYcF7iLmHcB+P4ZPMnWEwWXh31KhMzJtbZ128oomxmIqwmSj3+K/ClBjnsvKJTqr4POZejXdwqhotbxYR6GSIiIiIiUg+FtCg1bNgwhg0bVunbz5s3j//+979s3bqVhIQEAFq3bl1Lq5PqMgyDL3YXUeIxSHZaGNIimrd/yWdjXjkbDrvoHO+o1OOsP+yi3GeQ4LDQMrpuChGHSw8z4t0RLNq1CKfVyb/H/pvhHYfXydduiJKcFnYVeThQ6gmqKFXs9pFX7i8+p6lTSkREREREpEEI6fG9qvrPf/5Dr169ePTRR0lPT6djx47ceeedlJaWhnpp8hsb8srZlFeOGX+XRPNoG71TIwD4clcRxe5Td7YZhvHrgPOkuhlwvrdwL/1f78+iXYto4mzC/KvnqyAVpOQjR/iCvQJfRZdUstOCM4ih+SIiIiIiIlJ/hFXLwdatW1m4cCFOp5PZs2eTm5vLjTfeyMGDB3nttdeOex+Xy4XL5Qq8X1BQAIDb7cbtdtfJumtLxfrr0/Modvv48sixvbOTHSTaDNxuN2cl2Pglz8WBMh+f7yhgZMuTX10ru8RDTqkXiwk6xZhr/TlmHcrioncvYnv+dtKi0/j4io85o+kZ9erPtqbVRX7ijzS47S8J7vttZ6H/ezg1ovazIPVffdz7JDwoOxIM5UeCofxIuFJ2pboqmxmTYRjBTyCuASaTidmzZzN69OgT3mbw4MF8//337Nu3j7i4OAA++ugjxowZQ3FxMREREcfc57777mPGjBnHfPydd94hMjKyxtYvYAC7kzpRFJmIo7yINvvWYOLXeJXZItmWmgEmM81yNxNXcuCEj7U3oT350SnEFe+n2cFfanXdm4s38/dtfyffk0+qPZUZ7WaQ4kip1a/ZWJQ4YtmR0g2bp4z2e1dU+3G2Nz2dUmccaQd/oUnx/hpcoYiIiIiIiNS0kpISJkyYQH5+PrGxJ54tHVadUmlpaaSnpwcKUgCdO3fGMAx2795Nhw4djrnP3Xffze233x54v6CggBYtWjB48OCT/sGEA7fbzfz58xk0aBA2W+iHP687XM7GPaWYTTCuSyrJPdKPuc2S/WUs2u/iYNOOjOjQk+jjXFGvzGvw4sYCMGDY6W1Ijzr277UmeH1eHl38KDPXzMTj85CRksEn4z4hJbpxFKTqIj+lHh/PbyzEbXVy4ZBh2C1VP4bp9Rk8s8Gfh6HnZJLgsNTCSiWc1Le9T8KHsiPBUH4kGMqPhCtlV6qr4pTaqYRVUapv3778+9//pqioiOjoaAA2b96M2WymefPmx72Pw+HA4Th2sLbNZmsw31T14bkUlHv5dp9/BtR5qZE0izn+1fL6NrOypcjLvhIPX2WXMaZt7DHH+H7KK8Vj+OcHtYqrnXlS2w5v4+rZV7No1yIAxnYZy0sjXiLOGXeKezY8tZkfmw2irEUUewzyvSaaOav+dQ4Uu/EaEGEx0TTKUSfzxSQ81Ie9T8KTsiPBUH4kGMqPhCtlV6qqsnkJ6cTgoqIiVq9ezerVqwHYtm0bq1evZufOnYC/y2nixImB20+YMIHExESuvfZa1q9fz3fffcfkyZOZNGnScY/uSd0wDIPPdxbh8hqkRVo5O+XEfxdmk4nhLaOxmGBLgZs1h1xHfb62B5wbhsGbP71JxgsZLNq1iBh7DG+MfoP3x7zfKAtSdSEpyGHne4o9ADSLsqogJSIiIiIi0oCEtCi1fPlyMjMzyczMBOD2228nMzOT6dOnA5CdnR0oUAFER0czf/588vLy6NWrF1deeSUjRozg6aefDsn6xW/NQRfbCt1YTHBxq2jMpygcJEVY6Zfmn+f19e5i8st/LVbsLvaQW+bFZoauCcd2uAXjUOkhrvjwCv4w5w8UlhfSt0VffvrLT0zMmKhiRy1KivAftztQ6qnW/SuuvNc8Sr+ZERERERERaUhCenzv/PPP52Rz1l9//fVjPtapUyfmz59fi6uSqsgv9/L1nmIA+qVFBrpiTuXMphFszi9nT7GHz3cWMa6d/xhfRZdU53gHTkvN1Uy/2fYNE2dPZE/hHqxmK/f1v4+7zr0LqzmsTrCGpeQa7JQSERERERGRhiOknVIS3gzD4LMdRZT7DJpHWTmzaeWPUJpNJi5uGYPVBNsL3aw+WEaJx8fGPP9xvsyk48+kqiqXx8XkLydz4ZsXsqdwDx0SOvDDpB+4p989KkjVkSSnv1OqOkWpgnIvhW4fJiAtUp1SIiIiIiIiDYlelUu1rcwtY0eRG5sZLm4Vc8pje7+X4LTQv1kUX+8p5ps9xRwo9eI1ICXCUiMFiHX713HlR1fyU85PAPypx594YsgTRNmjgn5sqbyKolSh20eZ11elDriKLqmmEZZqXblPRERERERE6i91Skm1HHZ5WbDXf2zv/GZRxDss1XqcXslOWkRbcfv8RS6AzKTghtYbhsEzS5+h10u9+CnnJ5Iik5h7xVxeHPGiClIh4LSaibb5t5qDVeyWqpgnla55UiIiIiIiIg2OilJSLZ/vLMLtg5bRNnoEcdTOdOQY35GaBXaziS7x1R9wnl2YzbC3h3HzvJsp85QxtP1Q1t6wlpGnjaz2Y0rwkiuO8JVWrSi1+0inlIaci4iIiIiINDwqSkmVFZZ72VnkxgRc1DI66CvXNXFYuLB5NAA9kp3VPqY1Z+Mcuv2zG19s+QKn1cmzw57lswmfkRqdGtT6JHgVR/gOlFX+Cnx7it3sK/FgAlpE66SxiIiIiIhIQ6NXelJlO4r8R6pSI600qeaxvd/LSHTSPtZOpLXqBami8iJum3cbL696GYDuqd15+9K36ZLcpUbWJsFLiqj6FfgWZpcAcHqCgxh7zeRMRERERERE6g8VpaTKdhb6i1Ito2v2SFWUreqNe0t3L+Wq2VeRdSgLEyYm95nMzAtmYrfYa3RtEpykKh7f213kZluhGzPQNzWyFlcmIiIiIiIioaKilFRZRadUTRelqsLj8/Dg9w9y/3/vx2t4aRHbgjcveZPzW58fsjXJiVUUpYo8Pso8PpzWkxcgvz/SJdUt0VFj3XgiIiIiIiJSv6goJVWSX+4lv9yHCWgeojk/Ww9v5erZV/PDrh8AuOL0K3j+oueJj4gPyXrk1BwWM7E2MwVuHwfKvLSIPnFRameRmx1Fbswm6KMuKRERERERkQZLRSmpkoqje2mRVhyWup+T/9XWr7j0/UspLC8k1hHL8xc9z5VnXFnn65CqS4qwUOD2kVvmocVJuuwqZkllJDqJ0ywpERERERGRBktFKamSiqN7rWJCc3SvW9NuOK1Ouqd2Z9Yls2jVpFVI1iFVl+S0srXAzYGTzJXaUVjOziI3FhP0Tomow9WJiIiIiIhIXVNRSirNMIxaG3JeWSnRKSyctJB28e2wmNVFE04Cw85PcAU+wzACs6QyEp3EqktKRERERESkQVNRSiotv9xHgduH2QTpUaEbct4xsWPIvrZUX3KgKOU57ud3FLrZXexRl5SIiIiIiEgjUfdDgSRsVRzdaxZpxW4xhXg1Em4Snf4aeInHoMTtO+pzhmHw/T5/l1T3JCcx6pISERERERFp8FSUkkoL9dE9CW92i4k4u3/LOfC7bqlthW72FHuwmqB3iq64JyIiIiIi0hioKCWVYhgGO490SrUM0ZBzCX/Hmyv121lSmUlOom3alkRERERERBoDvfqTSjns8lHo9mEJ8TwpCW/JR47w/bYotaXATXaJv0vqHHVJiYiIiIiINBoqSkmlVHRJNYuyYjNrnpRUT1LE0cPODcNg4ZEuqZ7JEUSpS0pERERERKTR0CvARuBAqYfPdhZy8ARXPauMHYXlgOZJSXCSKjqlSr0YhkFWQTn7Sj3YzHB2U11xT0REREREpDGxhnoBUrv2lXh4LyufMq9BsdvH2HZxVX6M386TahVtr+klSiOS6LRgAkq9BsWeX2dJ9UyOIFJdUiIiIiIiIo2KXgU2YPtKPLx7pCAF/tk9+eXeU9zrWIdcXoo9BlaT//ieSHXZzCaaOPzbzg/7Sthf6sVuNqlLSkREREREpBFSUaqByi52825WPi6vQXqUleZHikk/5ZZV+bF2FFbMk7Jh1TwpCVLFEb6VR7LYK9lJhFVbkYiIiIiISGOjV4IN0J5iN+9lFeDyGjSPsnJ5u1h6Jfs7UX46WIbXMKr0eIGjezGaJyXBS3JaAv/vMJs4U11SIiIiIiIijZKKUg3M7iI372cV4PIZtIi2cnm7OBwWMx3i7ERZTRR7DLLyyyv9eL+dJ6Uh51ITkp2/HgHt1VRdUiIiIiIiIo2VXg02ILuK3PxrSwHlPoOW0TbGto3DbvEft7OYTZyR6ARgVRWO8OWWeSnxGNjM0CxS86QkeKlHcuSwmDgzWV1SIiIiIiIijZWqDA3EziI3/96Sj9sHraJtjGkXi+13858yEp0szille6Gbwy4v8Q7LCR7t6McFSI+yYdE8KakBCU4Ll7eLJdpmxqkuKRERERERkUZLrwgbgF1FnkBBqnXM8QtSAE0cFtrG+o/gra5kt1TFkHMd3ZOa1DbWTtMI1cRFREREREQaMxWlwlyxI46PdhTj9kGbGBuXtT1+QapCZpL/CN+aQ2V4fCcfeP7beVIaci4iIiIiIiIiNUlFqTC2vcjNruTOeAxoF3vqghRAu1g7MTYzpR6DzXknH3i+v9RLmdc/TypV86REREREREREpAapKBWmXF4fn+4qxTBbaBtj5ZI2sVgrMfPJbDKRUTHw/GDpSW9b0SXVIsqGxaR5UiIiIiIiIiJSc1SUClMOi5nhLSKJLT7AiBaRlSpIVchIdGDCP4sqt8xzwttVFKVa6uieiIiIiIiIiNQwFaXCWKtoK+kHN1epIAUQY7fQPs4OnHjgue8386Q05FxEREREREREapqKUo1UxcDztYdcuI8z8Hx/qReX18BuNmmelIiIiIiIiIjUOBWlGqk2MTbi7GZcXoONh13HfH5HoX8IeotoK2bNkxIRERERERGRGqaiVCNlMpnoXjHw/DhH+HR0T0RERERERERqk4pSjdgZiU7MwN4SDzklvw489xkGu4v877eKsYdodSIiIiIiIiLSkKko1YhF2cx0bHJk4PnBX7ulcko8uHwGDouJphGWUC1PRERERERERBowFaUauYqB5+sOuXB5fQDsOHJ0r0W0TfOkRERERERERKRWqCjVyLWMtpHgsFDuM9hw2D/cfGehvyjVSvOkRERERERERKSWqCjVyJlMJronVQw8L8VrGOwq1pBzEREREREREaldIS1Kfffdd4wYMYJmzZphMpmYM2fOSW+/YMECTCbTMW/79u2rmwU3UN0SHFhMkFPqZVVuGW4fODVPSkRERERERERqUUiLUsXFxWRkZPDcc89V6X6bNm0iOzs78Na0adNaWmHjEGE106mJA4D/7i0G/F1SJs2TEhEREREREZFaYg3lFx82bBjDhg2r8v2aNm1KkyZNan5BjVhmkpN1h124/bPOaRmjo3siIiIiIiIiUnvCcqZU9+7dSUtLY9CgQSxatCjUy2kQ0qOsJDt/Pa6nIeciIiIiIiIiUptC2ilVVWlpabzwwgv06tULl8vFyy+/zPnnn8/SpUvp0aPHce/jcrlwuVyB9wsKCgBwu9243e46WXdtqVh/TT2PbvE2vsn2EmExEWfxhf2fj5xcTedHpK4ou1Jdyo4EQ/mRYCg/Eq6UXamuymbGZBiGUctrqRSTycTs2bMZPXp0le7Xv39/WrZsyaxZs477+fvuu48ZM2Yc8/F33nmHyMjI6iy1wfKZzOQ0aU2UK5/YkoOhXo6IiIiIiIiIhKGSkhImTJhAfn4+sbGxJ7xdWHVKHc9ZZ53FwoULT/j5u+++m9tvvz3wfkFBAS1atGDw4MEn/YMJB263m/nz5zNo0CBsNh23k6pRfiRcKbtSXcqOBEP5kWAoPxKulF2propTaqcS9kWp1atXk5aWdsLPOxwOHA7HMR+32WwN5puqIT0XqXvKj4QrZVeqS9mRYCg/EgzlR8KVsitVVdm8hLQoVVRURFZWVuD9bdu2sXr1ahISEmjZsiV33303e/bs4c033wTgySefpE2bNnTt2pWysjJefvllvvnmG7788stQPQUREREREREREamGkBalli9fzoABAwLvVxyz+8Mf/sDrr79OdnY2O3fuDHy+vLycO+64gz179hAZGckZZ5zBV199ddRjiIiIiIiIiIhI/RfSotT555/Pyeasv/7660e9P2XKFKZMmVLLqxIRERERERERkdpmDvUCRERERERERESk8VFRSkRERERERERE6pyKUiIiIiIiIiIiUudUlBIRERERERERkTqnopSIiIiIiIiIiNQ5FaVERERERERERKTOqSglIiIiIiIiIiJ1TkUpERERERERERGpcypKiYiIiIiIiIhInVNRSkRERERERERE6pyKUiIiIiIiIiIiUudUlBIRERERERERkTqnopSIiIiIiIiIiNQ5a6gXUNcMwwCgoKAgxCsJntvtpqSkhIKCAmw2W6iXI2FG+ZFwpexKdSk7EgzlR4Kh/Ei4UnaluipqLhU1mBNpdEWpwsJCAFq0aBHilYiIiIiIiIiINFyFhYXExcWd8PMm41RlqwbG5/Oxd+9eYmJiMJlMoV5OUAoKCmjRogW7du0iNjY21MuRMKP8SLhSdqW6lB0JhvIjwVB+JFwpu1JdhmFQWFhIs2bNMJtPPDmq0XVKmc1mmjdvHupl1KjY2FhtEFJtyo+EK2VXqkvZkWAoPxIM5UfClbIr1XGyDqkKGnQuIiIiIiIiIiJ1TkUpERERERERERGpcypKhTGHw8G9996Lw+EI9VIkDCk/Eq6UXakuZUeCofxIMJQfCVfKrtS2RjfoXEREREREREREQk+dUiIiIiIiIiIiUudUlBIRERERERERkTqnopSIiIiIiIiIiNQ5FaVERERERERERKTOqSglIiIiUkN0/RgRCRXtPyISjlSUkhPSDzaprv3794d6CSIiderQoUMAmEymEK9ERBob7T8iEs5UlJJjFBUV4Xa7MZlMKkxJla1atYrU1FS+++67UC9FpNJcLhc+ny/Uy5AwtWrVKpKSkli+fHmolyJhSPuPBEP7j4Qj7XvyWypKyVE2bNjAJZdcwvvvv095ebkKU1IlP/30E/379+e2226jX79+oV6OSKWsX7+eiRMnsmTJEu13UmWrV6+mf//+3H777fTq1SvUy5Ewo/1HgqH9R8KR9j35PWuoFyD1x44dO7jsssvYsmULRUVFOJ1ORo4cid1uxzAMtQTLSf3888/06dOH22+/nZkzZ2IYBllZWezdu5eOHTuSlpYW6iWKHGPbtm2MGDGCbdu2sX37dp5//nl69Oih/U4qpWLfmzx5MjNmzMAwDHJycsjJyaFLly7YbLZQL1HqMe0/EgztPxKOtO/J8ahTSgDwer18+OGHtG/fnmXLltGkSRMefPBB/vOf/6hjSk7J5XLxt7/9jdLSUmbOnAnA8OHDGTduHAMGDGDEiBHceuutoV2kyO+Ul5cza9Ysevbsyc8//0xhYSGTJk1i5cqVgf1O+56cSFFREbfccgs2m40ZM2YAcNlll3HRRReRmZnJoEGDePLJJ0O7SKm3tP9IMLT/SDjSvicnoqKUAGCxWLjggguYOHEiGRkZfPrpp6SkpAQKUy6XS4UpOSG73c7UqVPp3LkzZ599NoMGDcJisfCPf/yDtWvXMmLECBYsWMD9998f6qWKBJjNZs466yzGjBlDly5dWLNmDW63O/APJJ/Pp9/cyQlZrVb++Mc/kpaWxogRIxgyZAgej4e//e1v/PDDD7Rq1Yp33nmHN954I9RLlXpI+48EQ/uPhCPte3IiJkNVBjnC7XYf1epbXl7OqFGjyMnJYerUqYwaNQqbzcbcuXMZNWpUCFcq9clvj3auWrWKa665BqvVyty5c2nevDkApaWl/PnPf2bv3r189tln2O32UC5ZJKCsrAyn0xl43+VykZmZic1m49VXX6Vnz54YhsF3331H//79Q7hSqU8q9j2Xy8Vnn33G5MmTadq0KR9++GHgqHJ+fj4jRoygWbNmvPfeeyFesdRH2n+kOrT/SDjTvifHo6JUI5abm8uuXbuIjIykadOmxMfH4/P5MJvNeDwerFYrLpeL0aNHk5OTw1133cW3337Lf/7zH5YvX06zZs1C/RQkhCqKmBVbSEUn3dq1a8nOzmbgwIFYrVa8Xi8Wi4W///3vfPTRRyxatOioH0YidSkvL4+DBw8SGxtLVFQUkZGRgd/Meb1erFYrZWVl9OjRA5vNxosvvsgbb7zB4sWLmT9/PsnJyaF+ChJCFT8b4dcXhmVlZXzzzTeYzeZAl2jFvnfrrbeycuVKFixYgNms5vTGTvuPBEP7j4Qj7XtSGSpKNVJr1qxh7NixeL1eXC4XKSkpPPvss5xzzjmB21T88CsvL+eSSy5h/vz52O12vvvuO3r06BHC1Uuobdq0ifvvv5877rgjkIWKfyBVXN719/8AmjRpEoZh8NJLLwX+USVSl9asWcPVV19NSUkJPp+PHj16MHPmTDp16hQoyFcUW10uF2eeeSYbNmzAZrOxcOFC7XuN3C+//MIrr7zCddddR4cOHYBf973y8nLMZvMxe9v48eNJSkri6aef1pGERk77jwRD+4+EI+17UlkqmzdC+/btY8SIEYwePZrPPvuMZ555hg4dOtCvX7+jWnwrulzsdjutWrUiJiaGpUuXaoNo5LZu3cqgQYP47LPPeOCBB1i1ahXwa6eU2Ww+qiB16NAhpk6dyieffMLkyZNVkJKQ2L17N0OGDGHgwIG89dZb3HLLLRQWFtK7d2+WLFmC2WzG6/Vis9nweDw4HA769u1LXFwcy5cv177XyG3ZsoVzzz2Xf/7znzz//PNs2bIFIPBCz263H7W3lZSUcM8997BgwQJuuukmvSBs5LT/SDC0/0g40r4nVWJIo7Nq1Srj9NNPN7Zt2xb4WElJiXHnnXcadrvd+OSTTwzDMAyv12sYhmE899xzhslkMlauXBmK5Uo9UlJSYlxzzTXGmDFjjOeee84YOHCgMWLEiBNmY968ecYf/vAHo3nz5sqPhNTXX39t9OzZ0zh48GDgY1lZWcb48eONyMjIQD4r9r3HH39c+54YhmEYRUVFxoQJE4zx48cbM2bMMDIzM42bbrrJyMrKOu7tZ8+ebYwfP95IS0tTfsQwDO0/Un3afyRcad+TqlDLQiOUn5/PunXrArOAfD4fERERPProo5SWljJhwgSWL18eaA8eN24cQ4cOpW3btqFcttQDERERDB06lIKCAq6//npSU1N5/vnnuffee5kxYwaZmZlH3T4jI4Pdu3dz77330qZNmxCtWsQ/02D16tW43e7Ax9q1a8djjz2G2+1m7NixfPvtt7Ro0QLDMBgwYACbNm0K7IPSeDkcDvr3709kZCRXXXUVCQkJvPrqqwDceuuttGvX7qjb9+zZk/Xr13P//ffTvn37UCxZ6hntP1Jd2n8kXGnfk6rQTKlGyOv1csEFF5CWlsbzzz9PQkJC4Fzvnj17mDBhAgMHDmTatGmB41giJ/LBBx/wwgsvEBkZyf3330/37t1xuVwcPHiQZs2aBbIlEkr79u1j1KhRDBw4kLvvvpuYmJjA55YsWcL/+3//j1tvvZUrr7wyhKuU+qqsrAyHwxE4BvP000/z+uuv07dvX2677Tbatm1LeXk5eXl5NG3aNDBoWAS0/0hwtP9IONK+J1WhV4qNkMViYdy4cWzfvp2nn36agoKCQNEgPT2d6OhoNm7ciMlkUjFBTsjr9QIwZswY/vznP1NSUsL06dP58ccfue222+jVqxcul0uzDKReSE1NpX///nzxxRd89NFHlJWVBT53zjnn4PV6WbRoUQhXKPWZ0+kMXCkI4Oabb+aaa65h0aJF/O///i8bN25kypQpjBw5MjB0WKSC9h8JhvYfCUfa96QqdHyvkTGOXKnjhhtuYMuWLcydO5fS0lLuueceYmNjAUhMTCQ+Ph6v14vZbFZRQY5SkSGLxRK4YsbYsWMxmUz83//9H8OGDcPr9fLFF1/gcDhCvVyRQLfeww8/zOWXX84//vEPSktLueaaa3A6nQC0adOGZs2ahXilUl8db9+7+eabAZg1axafffYZ+/fv59tvv8Vut4d4tVKfaP+RYGn/kXCjfU+qSsf3GpmKlt6KzWLmzJl8+umn5OXlMXLkSHbt2sUnn3zCkiVL6Nq1a6iXK/VMRX7y8/OJi4sDfv3HEsDAgQNZuXIl33//PaeffnoolyoS8PujDJMmTeKnn34iMTGRwYMHs3HjRv71r3+xbNkyOnXqFMKVSn1UkZ+ioiKio6MBjjqWfM4557B582b++9//0q1bt1AuVeoh7T8SDO0/Eo6070lVqb+zAfP5fEe9X7FB7Nixg27durFgwQKmTZvGI488wuDBg1m7di0Oh4PFixerICUnzU+/fv345JNPAP8liT0eD1OmTOH7779nwYIFKkhJvWAYBh6PJ5Db/v37s3btWl555RVuueUWkpOT+eCDDzh48CALFy7UP4zkKL/Pz+jRo1m4cCEAZrMZt9vN9ddfz7Jly/SCUI6h/UeCof1HwoHL5TrmY9r3pDrUKdUA/baL5fdDpnfs2EHfvn0ZPnw4zz77LFbrryc4DcPQYHOpdH7++c9/HnW08/3336dTp05kZGTU+ZpF9u7dy48//khZWRkdOnSgR48egc9t3bqV888/n2HDhvHcc88dte+5XC7MZjM2my0Uy5Z6orL5eeGFF47a91588UV69OjBmWeeGYplSz2xbds25syZw4EDB+jduzcjRowIfE77j5xKZfOj/Ufqk/Xr13P99dfzyCOPcO655x71Oe17UlUqSjUw69evp0+fPkyZMoWpU6cCRxcWJk2ahM1mO+oH22+PX0njpvxIOFq7di2XXHIJ8fHx7N+/H4Dnn3+eiy++GMMwGDp0KElJSbz11lvKqhyjOvnRvicV1qxZw8UXX0zHjh0pKytj8eLFzJkzh5EjRwIwePBgkpKSePvtt5UZOUZ18qP9R+qDSZMm8frrr9O2bVtmzZpF79698fl8mEwmhgwZQmJiIu+8846yKpWiolQDsnv3bkaOHElxcTG5ublMnjyZv/71r8CvR68qBiSK/J7yI+Foy5Yt9O/fn6uuuoq//vWv7Nq1i+eff54DBw7wxhtvEBUVRXl5OTabTf8wkmMoPxKMzZs3M3DgQK6++mruu+8+iouLueqqq7j44ou58cYbAX9XgC76Icej/Eg4e+2119i0aRO5ubnMmTOH2bNnc9555wHHzpQSORVdfa+B8Pl8fPjhh7Rp04abbrqJZcuW8eCDDwLw17/+VQUFOSnlR8JReXk5zz33HH369GHmzJnYbDaaNGnCmWeeybRp0wJz0XQ1Ijke5UeCUV5ezowZMxg4cCAzZ87EYrFgt9uJiIhgyZIlrFixgjPOOIOJEyeqqCDHUH4k3EVGRvL999/z1VdfkZOTw5gxY1iwYAGzZs0iIyODcePGhXqJEkZUlGogzGYzF110EU2bNmXAgAF0794dwzB46KGHAH9hwWazHTMjSASUHwlPZrOZ9u3b06ZNG2w2W+BIwwUXXMD9999Pfn4+MTExR91Hxx6kgvIjwbDb7UydOpW9e/cGOgIefPBBZs+ezfjx43E6ndx2221kZWXxzDPPhHi1Ut8oPxLuevToESikfvzxx1xxxRVkZmYSHR3N4sWLQ708CTMqSjUgHTp0oH379gDEx8fzxz/+EZPJdFTHi2EYfPzxx/Tu3ZukpKRQLlfqGeVHwo3VauWSSy4hLS3tqI9XdLZ4vd5AEWHjxo106tRJBQUJUH4kWF27dg1crXjt2rV8++23fPLJJwwdOjRQ4Lziiiu46aabOO2000K8WqlvlB8JZx06dODw4cNs3ryZjh07YrFYsFqt+Hw+8vPzQ708CTMqSoWxvXv3smfPHg4ePMiFF16I2WzGbDbj8XiwWq0kJSUxadIkwP/bF8MwOHjwIE899RQ7d+4M8eol1JQfCUcVuc3NzWXIkCGkpKQABHLr8/koKCigpKQEu92OyWTi7rvv5pFHHuHw4cPExsaqsNCIKT8SjBP93ATo1q0bb7755lFFTrPZTJcuXfRLHAGUHwlPv83toEGDMJlMmM1mSktLiY+Pp7CwkJtvvpkFCxbwzTff8Pjjj3POOefwww8/cNZZZ4V6+RImVJQKU2vWrGH48OHExMSwefNmunXrxp/+9CeuuuoqoqOjAwPmkpOTue666zAMg6lTp9KkSRMWL158zG+GpXFRfiQcHS+3119/PVdffTXR0dGB46V2ux2r1UpERAQzZszgueeeY8mSJcTFxYX6KUgIKT8SjFP93ARITU096j6LFy+mefPmmksmyo+EpRPldsKECcTGxtKzZ0/OO+884uPj+fjjj+nRowezZs3CbrfTpEmTUC9fwoiGw4Sh3NxcrrjiCiZMmMCnn37K3r176dSpE6+//jrTpk2jsLAQi8USGNKalJTE+vXriYmJYeHChfTq1SvEz0BCSfmRcHSi3L7xxhuB3Fb8xjk6Oprk5GT+9Kc/8eCDD/Ltt9/qt3WNnPIjwajMz00g0EWXnZ3NtGnTeOmll3jkkUeOmU0mjYvyI+HoZLmdPn06paWl9OvXjwsuuIBPP/2UHj16AP4j8LNmzaJjx44hfgYSVgwJO2vXrjVat25t/PTTT4GPuVwuY/r06cZZZ51l3HPPPUZpaalhGIbh8/mMWbNmGSkpKcaKFStCtWSpR5QfCUdVye3PP/9smEwmIyIiwli9enWoliz1iPIjwahKfpYvX25cddVVRps2bYxVq1aFaMVSnyg/Eo5OlttevXoZM2bMMAzDMAoLC0O1RGlA1CkVhirmXFTM9fF4PNjtdqZNm0b//v359NNP+fHHHwH/b1369u3L0qVLAxVsadyUHwlHVclteno6d9xxBytWrCAjIyOUy5Z6QvmRYFQlP6mpqVx++eV8/fXXdO/ePYSrlvpC+ZFwdLLcDhgwgA8//JCFCxcSHR2NYRghXq2EO5OhFIUdl8vFueeeS2pqKnPmzMFisQSGtBqGQUZGBpmZmbzxxhu6fLUcQ/mRcFSV3Fbc3uFwhHjVUl8oPxKMyuSne/fuvPnmm6FeqtRDyo+Eo6r+3BQJhjqlwozP58PhcPDaa6/x3XffccMNNwAENgiTycTIkSPZv38/gAoKchTlR8JRVXJb8XsWFRSkgvIjwahsfg4cOBDilUp9pPxIOKrq6wWRYKkoFWbMZjNer5fTTz+dN954g3fffZeJEyeSk5MTuM22bduIj4/H6/WGcKVSHyk/Eo6qktuKAf0iFZQfCYZ+bkowlB8JR8qt1DUd36vnfn98qqJtsqioCJfLxerVq5kwYQKtWrUiISGBxMRE5s6dy+LFi+nWrVsIVy71gfIj4Ui5lWAoPxIM5UeCofxIOFJuJdTUKVVPVVSdK2qGhmEENojt27fTsWNHfvzxRwYOHMi6deu46KKLSE9Pp2nTpixbtkwbRCOn/Eg4Um4lGMqPBEP5kWAoPxKOlFupL9QpVQ9t3ryZf/7zn+zcuZOMjAyuvvpq2rRpA8CuXbvo0aMHo0aN4qWXXsLn82GxWAIVbp/Ph9msWmNjpvxIOFJuJRjKjwRD+ZFgKD8SjpRbqU+Upnpm7dq19OnTh8OHD+Pz+fj888959913MQwDt9vN3Llzueqqq3jppZcwmUxYLJaj7q/B1I2b8iPhSLmVYCg/EgzlR4Kh/Eg4Um6lvlGnVD2ydetWLrjgAq666ioeeOABAP74xz8SFRXFU089Fbid1+s9ZnMQUX4kHCm3EgzlR4Kh/EgwlB8JR8qt1EfqlKonvF4v8+fPZ+DAgdxxxx2Bs70RERH8/PPP9O/fn4kTJ/LDDz8E2idFKig/Eo6UWwmG8iPBUH4kGMqPhCPlVuordUrVI9u2baOkpISuXbsCcP/99/PQQw8xffp0ysrK2LRpEz/++CNfffVV4MyvSAXlR8KRcivBUH4kGMqPBEP5kXCk3Ep9ZA31AuRXbdq0CVSkXS4XS5cu5YMPPuDiiy8GYOHChVx22WVkZWVpk5BjKD8SjpRbCYbyI8FQfiQYyo+EI+VW6iMVpUJo7969rFy5kvLyclq1akXPnj0xmUx4vV4cDgcff/wxZrM5cIWDhIQEUlJSSEhICPXSpR5QfiQcKbcSDOVHgqH8SDCUHwlHyq2EAxWlQmTt2rWMHj2apKQktm7dSuvWrbnrrrsYM2ZMYKhcxZUNKi65OWvWLJxOJ61atQrZuqV+UH4kHCm3EgzlR4Kh/EgwlB8JR8qthAsNOg+BLVu2cNFFFzFmzBi+/PJL5s2bR9euXfn888/xer2BlsqKTWLnzp1MmTKFF198kZdffpmkpKRQLl9CTPmRcKTcSjCUHwmG8iPBUH4kHCm3ElYMqVMul8u4/fbbjcsvv9xwuVyBj7/yyitGYmKikZube9Ttf/zxR+PGG280MjIyjNWrV9f1cqWeUX4kHCm3EgzlR4Kh/EgwlB8JR8qthBsd36tjPp+P5s2b07lzZ+x2O4ZhYDKZ6NOnD9HR0bjd7qNu36tXL0pLS/nb3/5GWlpaiFYt9YXyI+FIuZVgKD8SDOVHgqH8SDhSbiXcqChVx5xOJ6NHjz7magZNmjTBZrMdtUmsWLGCnj17ct5559X1MqWeUn4kHCm3EgzlR4Kh/EgwlB8JR8qthBvNlKoD2dnZLFu2jHnz5uHz+QIbhNfrDZzjzc/P5/Dhw4H7TJ8+nUGDBnHw4MHAmV9pnJQfCUfKrQRD+ZFgKD8SDOVHwpFyK+FMnVK1bM2aNYwcORKHw0FOTg5paWlMnz6dIUOGkJCQEGinNJlMmM1moqOjeeCBB3jsscf4/vvvSUxMDPVTkBBSfiQcKbcSDOVHgqH8SDCUHwlHyq2EvTqdYNXI7N+/3+jUqZMxdepUY8uWLcaePXuMcePGGZ07dzbuvfdeY//+/YHb5uTkGJmZmca4ceMMu91uLF++PIQrl/pA+ZFwpNxKMJQfCYbyI8FQfiQcKbfSEKgoVYvWrVtntG7d+phv+Lvuusvo1q2b8eijjxrFxcWGYRjG+vXrDZPJZERERBirVq0KwWqlvlF+JBwptxIM5UeCofxIMJQfCUfKrTQEmilVi9xuNx6Ph5KSEgBKS0sBePjhhxkwYAD//Oc/ycrKAiA+Pp4bb7yRlStX0r1791AtWeoR5UfCkXIrwVB+JBjKjwRD+ZFwpNxKQ2AyDE01q01nnXUW0dHRfPPNNwC4XC4cDgcAZ555Ju3bt+fdd98FoKysDKfTGbK1Sv2j/Eg4Um4lGMqPBEP5kWAoPxKOlFsJd+qUqkHFxcUUFhZSUFAQ+NiLL77IunXrmDBhAgAOhwOPxwNAv379KC4uDtxWG0TjpvxIOFJuJRjKjwRD+ZFgKD8SjpRbaYhUlKoh69ev59JLL6V///507tyZt99+G4DOnTvz1FNPMX/+fMaOHYvb7cZs9v+x79+/n6ioKDwejy7D2cgpPxKOlFsJhvIjwVB+JBjKj4Qj5VYaKmuoF9AQrF+/nn79+jFx4kR69erFihUruPbaa+nSpQuZmZmMHDmSqKgobrzxRs444ww6deqE3W7n008/ZcmSJVit+mtozJQfCUfKrQRD+ZFgKD8SDOVHwpFyKw2ZZkoF6dChQ4wfP55OnTrx1FNPBT4+YMAAunXrxtNPPx34WGFhIQ888ACHDh3C6XRyww030KVLl1AsW+oJ5UfCkXIrwVB+JBjKjwRD+ZFwpNxKQ6eSaZDcbjd5eXmMGTMGAJ/Ph9lspk2bNhw6dAgAwzAwDIOYmBgeeeSRo24njZvyI+FIuZVgKD8SDOVHgqH8SDhSbqWhU0qDlJKSwltvvcV5550HgNfrBSA9PT2wCZhMJsxm81ED6UwmU90vVuod5UfCkXIrwVB+JBjKjwRD+ZFwpNxKQ6eiVA3o0KED4K9G22w2wF+t3r9/f+A2Dz30EC+//HLgSgjaJKSC8iPhSLmVYCg/EgzlR4Kh/Eg4Um6lIdPxvRpkNpsxDCOwAVRUrqdPn84DDzzAqlWrNGROTkj5kXCk3EowlB8JhvIjwVB+JBwpt9IQqVOqhlXMjbdarbRo0YLHHnuMRx99lOXLl5ORkRHi1Ul9p/xIOFJuJRjKjwRD+ZFgKD8SjpRbaWhURq1hFdVqm83GSy+9RGxsLAsXLqRHjx4hXpmEA+VHwpFyK8FQfiQYyo8EQ/mRcKTcSkOjTqlaMmTIEAB++OEHevXqFeLVSLhRfiQcKbcSDOVHgqH8SDCUHwlHyq00FCajov9PalxxcTFRUVGhXoaEKeVHwpFyK8FQfiQYyo8EQ/mRcKTcSkOgopSIiIiIiIiIiNQ5Hd8TEREREREREZE6p6KUiIiIiIiIiIjUORWlRERERERERESkzqkoJSIiIiIiIiIidU5FKRERERERERERqXMqSomIiIiIiIiISJ1TUUpEREREREREROqcilIiIiIideCaa67BZDJhMpmw2WykpKQwaNAgXn31VXw+X6Uf5/XXX6dJkya1t1ARERGROqKilIiIiEgdGTp0KNnZ2Wzfvp3PP/+cAQMGcMsttzB8+HA8Hk+olyciIiJSp1SUEhEREakjDoeD1NRU0tPT6dGjB1OnTmXu3Ll8/vnnvP766wA88cQTdOvWjaioKFq0aMGNN95IUVERAAsWLODaa68lPz8/0HV13333AeByubjzzjtJT08nKiqKs88+mwULFoTmiYqIiIhUgopSIiIiIiF0wQUXkJGRwUcffQSA2Wzm6aefZt26dbzxxht88803TJkyBYA+ffrw5JNPEhsbS3Z2NtnZ2dx5550A3HTTTSxevJj33nuPNWvWMHbsWIYOHcovv/wSsucmIiIicjImwzCMUC9CREREpKG75ppryMvLY86cOcd87oorrmDNmjWsX7/+mM998MEH/OUvfyE3Nxfwz5S69dZbycvLC9xm586dtG3blp07d9KsWbPAxy+88ELOOussHnzwwRp/PiIiIiLBsoZ6ASIiIiKNnWEYmEwmAL766iseeughNm7cSEFBAR6Ph7KyMkpKSoiMjDzu/deuXYvX66Vjx45HfdzlcpGYmFjr6xcRERGpDhWlREREREJsw4YNtGnThu3btzN8+HBuuOEG/v73v5OQkMDChQu57rrrKC8vP2FRqqioCIvFwooVK7BYLEd9Ljo6ui6egoiIiEiVqSglIiIiEkLffPMNa9eu5bbbbmPFihX4fD4ef/xxzGb/6M9//etfR93ebrfj9XqP+lhmZiZer5f9+/dz3nnn1dnaRURERIKhopSIiIhIHXG5XOzbtw+v10tOTg7z5s3joYceYvjw4UycOJGff/4Zt9vNM888w4gRI1i0aBEvvPDCUY/RunVrioqK+Prrr8nIyCAyMpKOHTty5ZVXMnHiRB5//HEyMzM5cOAAX3/9NWeccQYXX3xxiJ6xiIiIyInp6nsiIiIidWTevHmkpaXRunVrhg4dyrfffsvTTz/N3LlzsVgsZGRk8MQTT/DII49w+umn8/bbb/PQQw8d9Rh9+vThL3/5C+PGjSM5OZlHH30UgNdee42JEydyxx13cNpppzF69Gh+/PFHWrZsGYqnKiIiInJKuvqeiIiIiIiIiIjUOXVKiYiIiIiIiIhInVNRSkRERERERERE6pyKUiIiIiIiIiIiUudUlBIRERERERERkTqnopSIiIiIiIiIiNQ5FaVERERERERERKTOqSglIiIiIiIiIiJ1TkUpERERERERERGpcypKiYiIiIiIiIhInVNRSkRERERERERE6pyKUiIiIiIiIiIiUudUlBIRERERERERkTr3/wHXeM01gLMhVgAAAABJRU5ErkJggg==",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Plotting moving averages\n",
"plt.figure(figsize=(12, 6))\n",
"plt.plot(ohlcv_df[\"price_close\"], label=\"Closing Price\", color=\"skyblue\")\n",
"plt.plot(ohlcv_df[\"7_day_MA\"], label=\"7-Day Moving Average\", color=\"green\")\n",
"plt.plot(ohlcv_df[\"30_day_MA\"], label=\"30-Day Moving Average\", color=\"red\")\n",
"\n",
"plt.title(\"Closing Price and Moving Averages\")\n",
"plt.xlabel(\"Date\")\n",
"plt.ylabel(\"Price\")\n",
"plt.legend()\n",
"plt.grid(True)\n",
"plt.xticks(rotation=45)\n",
"plt.tight_layout()\n",
"plt.show()"
]
}
],
"metadata": {
"colab": {
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_Document_Creation_RAG_with_Langchain_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "y4_-FI2aLFYP"
},
"source": [
"# PyAirbyte Demo\n",
"\n",
"This demo uses the PyAirbyte libary to read records from Github, converts those records to documents, which can then be passed to LangChain for RAG.\n",
"\n",
"#### Prerequisites:\n",
"\n",
"- A Github personal access token. For details on configuring authetication credentials, refer to the Github source connector [documentation](https://docs.airbyte.com/integrations/sources/github).\n",
"\n",
"- OpenAI API Key. You can create one by signing up on https://openai.com/ and\n",
"going to the \"Keys\" tab on left sidebar."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "LZ5mUBw_g2kT"
},
"source": [
"## Install PyAirbyte and other dependencies\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kZjfRtSGoQHJ"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install PyAirbyte & Langchain modules\n",
"%pip install --quiet airbyte langchain langchain_openai langchainhub chromadb"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "3MtTFRoYg6bP"
},
"source": [
"## Load the Source Data using PyAirbyte\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "TEzG3kXznVtz"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"# Configure and read from the source\n",
"read_result = ab.get_source(\n",
" \"source-github\",\n",
" config={\n",
" \"repositories\": [\"airbytehq/pyAirbyte\"],\n",
" \"credentials\": {\n",
" \"personal_access_token\": ab.get_secret(\"GITHUB_PERSONAL_ACCESS_TOKEN\")\n",
"\n",
" }\n",
" },\n",
" streams=[\"issues\"],\n",
").read()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "nM4mWwmkhLS_"
},
"source": [
"## Read a single record from stream to examine the fields"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kj1u-znnuYKu"
},
"outputs": [],
"source": [
"first_record = next((record for record in read_result[\"issues\"]))\n",
"\n",
"# Print the fields list, followed by the first full record.\n",
"display(list(first_record.keys()))\n",
"display(first_record)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZiGGcJdUjnsf"
},
"source": [
"## Use PyAirbyte `to_documents()` method on a dataset\n",
"\n",
"This demo uses a new `to_documents()` method, which accepts record property names which point to specific aspects of the document.\n",
"\n",
"When we set `render_metadata=True`, then metadata properties are also published to the markdown file. This option is helpful for small-ish documents, when passing the entire document to the LLM. It will be less helpful on long documents which are planned to be split into smaller chuns.\n",
"\n",
"Note: We use `rich` to print the documents as markdown, although that's not strictly necessary.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "TmA_bF6rjoXy"
},
"outputs": [],
"source": [
"import textwrap\n",
"from rich.console import Console\n",
"\n",
"# convert incoming stream data into documents\n",
"docs = list(read_result[\"issues\"].to_documents(\n",
" title_property=\"title\",\n",
" content_properties=[\"body\"],\n",
" metadata_properties=[\"state\", \"url\", \"number\"],\n",
" render_metadata=True,\n",
"))\n",
"\n",
"# print a doc comprising github issue\n",
"console = Console()\n",
"console.print(str(docs[10]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0mkPWz8NkTbH"
},
"source": [
"## Use Langchain to build a RAG pipeline.\n",
"\n",
"Here, we just show a generic method of splitting docs. This and the following steps are copied from a generic LangChain tutorial."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "s1mFwmOGkNqe"
},
"outputs": [],
"source": [
"# Split the docs so they can be stored in vector database downstream\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=30)\n",
"\n",
"chunked_docs = splitter.split_documents(docs)\n",
"\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "SdRovb3c1eoH"
},
"source": [
"Now we can publish the chunks to a vector store. Ensure you have added your `OPENAI_API_KEY` to the secrects tab on left."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Lj7rXrF417tX"
},
"outputs": [],
"source": [
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"\n",
"# store into vector db\n",
"vectorstore = Chroma.from_documents(documents=chunked_docs, embedding=OpenAIEmbeddings())\n",
"print(\"Chunks successfully stored in vectorstore.\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MSP8TgBf38VN"
},
"source": [
"Set up a RAG application using LangChain."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kaamV9Q93v52"
},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"retriever = vectorstore.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "3eDeZ6-74DH7"
},
"source": [
"Ask a question."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "_haRAZyJ4Gjf"
},
"outputs": [],
"source": [
"console.print(rag_chain.invoke(\"Show me all documentation related issues, along with issue number, each on a new line.\"))"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_GA4_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "En5baAhvYE_y"
},
"source": [
"In this demo, we use PyAirbyte to extract data from Google Analytics 4, followed by a series of transformations and analyses to derive meaningful insights from this data.\n",
"\n",
"#### Prerequisites:\n",
"\n",
"- A Google Analytics account with access to a GA4 property. For details on configuring authetication credentials, refer to the [documentation](https://docs.airbyte.com/integrations/sources/google-analytics-data-api).\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8awBDcLvRW2g"
},
"source": [
"### Installing PyAirbyte\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xrhNw5kK5Lvx"
},
"outputs": [],
"source": [
"# Add virtual environment support in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install PyAirbyte\n",
"%pip install airbyte"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "mYsTAS1wRgO_"
},
"source": [
"### Load source data from Google Analytics 4 to local cache\n",
"\n",
"In this section, we establish a connection to GA4 via PyAirbyte. The source connector is configured with necessary parameters like the GA4 property ID, the service account JSON key, and the data period we are interested in. Check the [docs](https://docs.airbyte.com/integrations/sources/google-analytics-data-api) for more details on these parameters.\n",
"\n",
"After configuring the source connector, we perform a `check()` to ensure that the configuration is correct and the connection to the API is successful. Then, we list the available streams for this source and select the ones we are interested in syncing. In this case, we are only syncing the `pages` stream.\n",
"\n",
"Then, we proceed to read from the source into the internal DuckDB cache.\n",
"\n",
"Note: The credentials are retrieved securely using the `get_secret()` method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 166
},
"id": "9em82J2Q5WzN",
"outputId": "f1b3fc28-cb4b-4f0f-d412-b702005c2e72"
},
"outputs": [
{
"data": {
"text/html": [
"
Connector 'source-google-analytics-data-api' installed successfully!\n",
"For more information, see the source-google-analytics-data-api documentation:\n",
"https://docs.airbyte.com/integrations/sources/google-analytics-data-api#reference\n",
"\n",
"
\n"
],
"text/plain": [
"Connector \u001b[32m'source-google-analytics-data-api'\u001b[0m installed successfully!\n",
"For more information, see the source-google-analytics-data-api documentation:\n",
"\u001b[4;94mhttps://docs.airbyte.com/integrations/sources/google-analytics-data-api#reference\u001b[0m\n",
"\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Connection check succeeded for `source-google-analytics-data-api`.\n",
"
Connector 'source-github' installed successfully!\n",
"For more information, see the source-github documentation:\n",
"https://docs.airbyte.com/integrations/sources/github#reference\n",
"\n",
"
\n"
],
"text/plain": [
"Connector \u001b[32m'source-github'\u001b[0m installed successfully!\n",
"For more information, see the source-github documentation:\n",
"\u001b[4;94mhttps://docs.airbyte.com/integrations/sources/github#reference\u001b[0m\n",
"\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Connection check succeeded for `source-github`.\n",
"
Completed `source-github` read operation at 22:16:34.\n",
"
\n"
],
"text/plain": [
"Completed `source-github` read operation at \u001b[1;92m22:16:34\u001b[0m.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Select the streams we are interested in loading to cache\n",
"source.set_streams([\"pull_requests\", \"issues\", \"reviews\", \"stargazers\"])\n",
"\n",
"# Read into DuckDB local default cache\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "7iFNbPbPFB3q"
},
"source": [
"### Read again to sync changes\n",
"\n",
"The PyAirbyte Github source connector has the ability to read data incrementally by default, meaning that after the first successful data sync, only updates and new records will be synced in subsequent reads.\n",
"\n",
"For more information on sync modes for this source, you can refer to the [docs](https://docs.airbyte.com/integrations/sources/github).\n",
"\n",
"Let's read again, and see how no records will be loaded to cache.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 332
},
"id": "UOm1jvefGMrR",
"outputId": "a4fc014c-0d48-4186-c21b-1ba6e890f9ae"
},
"outputs": [
{
"data": {
"text/markdown": [
"## Read Progress\n",
"\n",
"Started reading at 22:17:30.\n",
"\n",
"Read **0** records over **3 seconds** (0.0 records / second).\n",
"\n",
"Finished reading at 22:17:33.\n",
"\n",
"Started finalizing streams at 22:17:33.\n",
"\n",
"Finalized **0** batches over 0 seconds.\n",
"\n",
"Completed 4 out of 4 streams:\n",
"\n",
" - reviews\n",
" - stargazers\n",
" - pull_requests\n",
" - issues\n",
"\n",
"\n",
"Completed writing at 22:17:33. Total time elapsed: 3 seconds\n",
"\n",
"\n",
"------------------------------------------------\n"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Completed `source-github` read operation at 22:17:33.\n",
"
\n"
],
"text/plain": [
"Completed `source-github` read operation at \u001b[1;92m22:17:33\u001b[0m.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"result = source.read(cache=cache)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "EZfsqKfJGg_Z"
},
"source": [
"### Read data from the cache\n",
"\n",
"Read from the already-written DuckDB cache into a pandas Dataframe. After the data is in the cache, you can read it without re-configuring or re-creating the source object. You can also select a specific stream to read from.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "E5-ADp0dGnGR"
},
"outputs": [],
"source": [
"# Read from the cache into a pandas Dataframe:\n",
"reviews = cache[\"reviews\"].to_pandas()\n",
"stargazers = cache[\"stargazers\"].to_pandas()\n",
"pull_requests = cache[\"pull_requests\"].to_pandas()\n",
"issues = cache[\"issues\"].to_pandas()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "EDs_pEAaGP5X"
},
"source": [
"### Transform and analyze the data\n",
"\n",
"Let's take our Github data and run some analysis with `pandas` and `matplotlib`.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 216
},
"id": "DkUfVobeGbMG",
"outputId": "5c0c1712-66e9-4a58-dc04-8c9709359a36"
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"
Connector 'source-faker' installed successfully!\n",
"For more information, see the source-faker documentation:\n",
"https://docs.airbyte.com/integrations/sources/faker#reference\n",
"\n",
"
Connector 'source-shopify' installed successfully!\n",
"For more information, see the source-shopify documentation:\n",
"https://docs.airbyte.com/integrations/sources/shopify#reference\n",
"\n",
"
\n"
],
"text/plain": [
"Connector \u001b[32m'source-shopify'\u001b[0m installed successfully!\n",
"For more information, see the source-shopify documentation:\n",
"\u001b[4;94mhttps://docs.airbyte.com/integrations/sources/shopify#reference\u001b[0m\n",
"\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Connection check succeeded for `source-shopify`.\n",
"
\n"
],
"text/plain": [
"Connection check succeeded for `source-shopify`.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import airbyte as ab\n",
"from google.colab import userdata\n",
"\n",
"# Create and configure the source connector:\n",
"source = ab.get_source(\n",
" \"source-shopify\",\n",
" install_if_missing=True,\n",
" config={\n",
" \"shop\": userdata.get(\"SHOP\"),\n",
" \"credentials\": {\n",
" \"auth_method\": \"api_password\",\n",
" \"api_password\": userdata.get(\"SHOP_API_PASSWORD\")\n",
" }\n",
" }\n",
")\n",
"\n",
"# Verify the config and creds by running `check()`:\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-4OCBJd2tL4W"
},
"source": [
"## Load source data from Shopify to local DuckDB cache\n",
"\n",
"Now, we list the available streams for the source Shopify. This is optional but can be useful.\n",
"\n",
"Then, we select the streams we want to sync to the local cache, and proceed to `read()` to get the records."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "R4udSNyObVAv"
},
"outputs": [],
"source": [
"# List the available streams available for the Shopify source\n",
"source.get_available_streams()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 355
},
"id": "AlqnXX6ibZqX",
"outputId": "ee680a1b-20ea-4cb6-e59d-ac95827d1151"
},
"outputs": [
{
"data": {
"text/markdown": [
"## Read Progress\n",
"\n",
"Started reading at 22:20:53.\n",
"\n",
"Read **52** records over **16 seconds** (3.2 records / second).\n",
"\n",
"Wrote **52** records over 4 batches.\n",
"\n",
"Finished reading at 22:21:09.\n",
"\n",
"Started finalizing streams at 22:21:09.\n",
"\n",
"Finalized **4** batches over 1 seconds.\n",
"\n",
"Completed 4 out of 4 streams:\n",
"\n",
" - product_variants\n",
" - products\n",
" - customers\n",
" - collections\n",
"\n",
"\n",
"Completed writing at 22:21:10. Total time elapsed: 17 seconds\n",
"\n",
"\n",
"------------------------------------------------\n"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Completed `source-shopify` read operation at 22:21:10.\n",
"
\n"
],
"text/plain": [
"Completed `source-shopify` read operation at \u001b[1;92m22:21:10\u001b[0m.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Select the streams we are interested in loading to cache\n",
"source.select_streams([\"products\", \"product_variants\", \"collections\", \"customers\"])\n",
"\n",
"# Read into DuckDB local default cache\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZKBR6MilSJta"
},
"source": [
"## Read data from the cache\n",
"\n",
"Now we can read from the already-written DuckDB cache into a pandas Dataframe. After the data is in the cache, we can read it without re-configuring or re-creating the source object. We can also select a stream to read from."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"id": "lFlveLjfGYof"
},
"outputs": [],
"source": [
"# Read from the cache into a pandas Dataframe:\n",
"products = cache[\"products\"].to_pandas()\n",
"product_variants = cache[\"product_variants\"].to_pandas()\n",
"collections = cache[\"collections\"].to_pandas()\n",
"customers = cache[\"customers\"].to_pandas()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "p7vX1_w4SYaF"
},
"source": [
"## Data analysis\n",
"\n",
"Let's analyze the data!\n",
"\n",
"Please note that since we used a Shopify sample store for this demo, the data is limited. If you have access to an actual store, you can select more streams and do a more extensive analysis here.\n",
"\n",
"### 1. Product Category and Variant Analysis\n",
"Analyze product types, variants, and their inventory levels."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
},
"id": "B8orH6pnmcy7",
"outputId": "13552705-0b38-4992-b58b-9b25cf963bdd"
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAjMAAAIFCAYAAAAncXxyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA4KElEQVR4nO3de1wVdeL/8fdB4IAgiCgoieLi/RKal9JK8ZKKt7St1DVFzdUsb+lWuq33WsraotIs+36VLbFarbStvOelLF3QtNwUL6lRiVgmCOpRYH5/9OX8PIEXEJiZej0fj3nkfGbmnDdHyDcznznHYRiGIQAAAJvyMjsAAADA9aDMAAAAW6PMAAAAW6PMAAAAW6PMAAAAW6PMAAAAW6PMAAAAW6PMAAAAW6PMAAAAW6PMADbhcDg0a9Yss2P8bgwfPlxRUVEV8lxRUVEaPny4ez0pKUkOh0OpqakV8vyxsbGKjY2tkOcCygNlBrgGhf+4FC5+fn5q2LChxo0bpxMnTpgdr0y8/PLLSkpKuup+w4cP93gtLrdc+o+z2WbNmuWRrXLlyqpTp4769u2rJUuWyOVylcnzfP3115o1a5aOHj1aJo9XlqycDbhe3mYHAOxkzpw5qlevns6fP69PP/1UCxcu1EcffaS9e/eqcuXKZse7Li+//LKqV69+1RIyZswYdevWzb1+5MgRzZgxQ6NHj9btt9/uHo+Oji6vqKW2cOFCBQYGyuVy6fvvv9fatWs1cuRIJSYm6oMPPlBkZKR739dee00FBQUlevyvv/5as2fPVmxsbInO6qSlpcnLq3x/t7xStnXr1pXrcwPljTIDlEBcXJzatGkjSRo1apRCQ0P13HPPadWqVRo8eHCxx+Tm5iogIKAiY5ar9u3bq3379u711NRUzZgxQ+3bt9d9991nYrKru/vuu1W9enX3+owZM5ScnKxhw4bpnnvu0fbt293bfHx8yjWLYRg6f/68/P395XQ6y/W5rsbX19fU5weuF5eZgOvQpUsXSb+cnZB+uQQTGBiow4cPq1evXqpSpYqGDBki6ZdSM2XKFEVGRsrpdKpRo0Z69tln9esPrne5XHr44YdVo0YNValSRf369dN3331X5LkvN6ej8JLKry1dulTt2rVT5cqVFRISoo4dO7p/I4+KitJ///tfbdmyxX0pprRzKDZt2iSHw6H33nuvyLZly5bJ4XDo888/d38NgYGB+uabb9SjRw8FBAQoIiJCc+bMKfK6FBQUKDExUc2aNZOfn5/Cw8M1ZswY/fzzz6XKWWjIkCEaNWqUduzYofXr17vHi3t933rrLbVu3VpVqlRRUFCQWrRooRdeeEHSL5ci77nnHklS586d3a/j5s2bJf3yGvfp00dr165VmzZt5O/vr1dffdW9rbgzYmfPntWYMWMUGhqqoKAgDRs2rMjXe7m5VJc+5tWyFTdnJjMzU/fff7/Cw8Pl5+enmJgY/fOf//TY5+jRo3I4HHr22We1aNEiRUdHy+l0qm3btkpJSSn29QbKA2dmgOtw+PBhSVJoaKh7LC8vTz169NBtt92mZ599VpUrV5ZhGOrXr582bdqk+++/Xy1bttTatWv1yCOP6Pvvv9fzzz/vPn7UqFFaunSp/vSnP6lDhw76+OOP1bt37+vKOXv2bM2aNUsdOnTQnDlz5Ovrqx07dujjjz9W9+7dlZiYqPHjxyswMFCPP/64JCk8PLxUzxUbG6vIyEglJydrwIABHtuSk5MVHR3tcWYnPz9fPXv21C233KJ58+ZpzZo1mjlzpvLy8jRnzhz3fmPGjFFSUpJGjBihCRMm6MiRI5o/f76++OILbdu27brOpAwdOlSLFi3SunXrdMcddxS7z/r16zV48GB17dpVTz/9tCRp37592rZtmyZOnKiOHTtqwoQJevHFF/XXv/5VTZo0kST3f6VfLicNHjxYY8aM0Z///Gc1atToirnGjRunqlWratasWUpLS9PChQt17Ngxbd68udjCejnXku1S586dU2xsrA4dOqRx48apXr16Wr58uYYPH67Tp09r4sSJHvsvW7ZMZ86c0ZgxY+RwODRv3jzddddd+uabb8r9DBcgSTIAXNWSJUsMScaGDRuMkydPGunp6cZbb71lhIaGGv7+/sZ3331nGIZhxMfHG5KMqVOnehy/cuVKQ5LxxBNPeIzffffdhsPhMA4dOmQYhmHs3r3bkGQ8+OCDHvv96U9/MiQZM2fOdI/Fx8cbdevWLZJ15syZxqU/2gcPHjS8vLyMAQMGGPn5+R77FhQUuP/crFkzo1OnTtf8mhRKSUkxJBlLlixxj02bNs1wOp3G6dOn3WOZmZmGt7d3ka9BkjF+/HiPTL179zZ8fX2NkydPGoZhGJ988okhyUhOTvZ47jVr1hQ7/muFr0nh4/3azz//bEgyBgwY4JHt0td34sSJRlBQkJGXl3fZ51m+fLkhydi0aVORbXXr1jUkGWvWrCl2W3x8vHu98PutdevWxoULF9zj8+bNMyQZq1atco/9+vvico95pWydOnXy+LtPTEw0JBlLly51j124cMFo3769ERgYaGRnZxuGYRhHjhwxJBmhoaHGqVOn3PuuWrXKkGT8+9//LvJcQHngMhNQAt26dVONGjUUGRmpQYMGKTAwUO+9955uuOEGj/3Gjh3rsf7RRx+pUqVKmjBhgsf4lClTZBiGVq9e7d5PUpH9Jk2aVOrMK1euVEFBgWbMmFFkkmlJfrsviWHDhsnlcmnFihXusbffflt5eXnFzqsZN26cR6Zx48bpwoUL2rBhgyRp+fLlCg4O1h133KEff/zRvbRu3VqBgYHatGnTdeUNDAyUJJ05c+ay+1StWlW5ubkel6JKql69eurRo8c17z969GiPMxtjx46Vt7e3+/ukvHz00UeqWbOmxzwwHx8fTZgwQTk5OdqyZYvH/gMHDlRISIh7vXAi+DfffFOuOYFCXGYCSmDBggVq2LChvL29FR4erkaNGhUpCN7e3qpdu7bH2LFjxxQREaEqVap4jBee5j927Jj7v15eXkXuBLra5YgrOXz4sLy8vNS0adNSP0ZJNW7cWG3btlVycrLuv/9+Sb9cYrrllltUv359j329vLz0hz/8wWOsYcOGkuS+jfjgwYPKyspSWFhYsc+XmZl5XXlzcnIkqcjfz6UefPBB/etf/1JcXJxuuOEGde/eXffee6969ux5zc9Tr169EuVq0KCBx3pgYKBq1apV7rdXHzt2TA0aNCjyvf3r79dCderU8VgvLDbXO58JuFaUGaAE2rVr576b6XKcTme532YrXf6sSn5+frk/97UYNmyYJk6cqO+++04ul0vbt2/X/PnzS/VYBQUFCgsLU3JycrHba9SocT1RtXfvXkkqUrQuFRYWpt27d2vt2rVavXq1Vq9erSVLlmjYsGFFJsZejr+//3XlLImK/D6oVKlSsePGryZxA+WFy0xABahbt65++OGHIpcx9u/f795e+N+CggL3xOJCaWlpRR4zJCREp0+fLjL+69+ao6OjVVBQoK+//vqKGcv6ktOgQYNUqVIlvfnmm0pOTpaPj48GDhxYZL+CgoIilyMOHDggSe67iaKjo/XTTz/p1ltvVbdu3YosMTEx15X1jTfekKSrXgLy9fVV37599fLLL+vw4cMaM2aMXn/9dR06dEhS2b+GBw8e9FjPycnR8ePHPe6yKu774MKFCzp+/LjHWEmy1a1bVwcPHizyPju//n4FrIIyA1SAXr16KT8/v8iZieeff14Oh0NxcXGS5P7viy++6LFfYmJikceMjo5WVlaWvvzyS/fY8ePHi9wS3b9/f3l5eWnOnDlF/nG69DfngICAYstRaVWvXl1xcXFaunSpkpOT1bNnT4/3eLnUpa+LYRiaP3++fHx81LVrV0nSvffeq/z8fM2dO7fIsXl5edeVe9myZfqf//kftW/f3v18xfnpp5881r28vHTjjTdKkvsdhAvfT6isXsdFixbp4sWL7vWFCxcqLy/P/X0i/fJ9sHXr1iLH/frMTEmy9erVSxkZGXr77bfdY3l5eXrppZcUGBioTp06lebLAcoNl5mACtC3b1917txZjz/+uI4ePaqYmBitW7dOq1at0qRJk9xzZFq2bKnBgwfr5ZdfVlZWljp06KCNGze6f/O/1KBBg/TYY49pwIABmjBhgs6ePauFCxeqYcOG2rVrl3u/+vXr6/HHH9fcuXN1++2366677pLT6VRKSooiIiKUkJAgSWrdurUWLlyoJ554QvXr11dYWJj7fXRKa9iwYbr77rslqdgiIkl+fn5as2aN4uPjdfPNN2v16tX68MMP9de//tV9+ahTp04aM2aMEhIStHv3bnXv3l0+Pj46ePCgli9frhdeeMH9PFeyYsUKBQYG6sKFC+53AN62bZtiYmK0fPnyKx47atQonTp1Sl26dFHt2rV17NgxvfTSS2rZsqV7LknLli1VqVIlPf3008rKypLT6VSXLl0uO9fnai5cuKCuXbvq3nvvVVpaml5++WXddttt6tevn0euBx54QH/84x91xx13aM+ePVq7dm2R4liSbKNHj9arr76q4cOHa+fOnYqKitKKFSu0bds2JSYmXnFuEWAKc2+mAuyh8FbZlJSUK+4XHx9vBAQEFLvtzJkzxsMPP2xEREQYPj4+RoMGDYxnnnnG4/ZowzCMc+fOGRMmTDBCQ0ONgIAAo2/fvkZ6enqxt+CuW7fOaN68ueHr62s0atTIWLp0aZFbswstXrzYaNWqleF0Oo2QkBCjU6dOxvr1693bMzIyjN69extVqlQxJF3zbdrF3ZpdyOVyGSEhIUZwcLBx7ty5ItsLX6/Dhw8b3bt3NypXrmyEh4cbM2fOLHIbuWEYxqJFi4zWrVsb/v7+RpUqVYwWLVoYjz76qPHDDz9cMWPha1K4+Pn5GbVr1zb69OljLF682Dh//nyx2S69NXvFihVG9+7djbCwMMPX19eoU6eOMWbMGOP48eMex7322mvGH/7wB6NSpUoet0LXrVvX6N27d7H5Lndr9pYtW4zRo0cbISEhRmBgoDFkyBDjp59+8jg2Pz/feOyxx4zq1asblStXNnr06GEcOnSoyGNeKduvb802DMM4ceKEMWLECKN69eqGr6+v0aJFiyJ/x4W3Zj/zzDNFvqbivl+B8uIwDGZoASgfeXl5ioiIUN++ffW///u/RbYPHz5cK1ascN9NBAClwZwZAOVm5cqVOnnypIYNG2Z2FAC/YcyZAVDmduzYoS+//FJz585Vq1atmDAKoFxxZgZAmVu4cKHGjh2rsLAwvf7662bHAfAbx5wZAABga5yZAQAAtkaZAQAAtvabnwBcUFCgH374QVWqVCm3TwgGAABlyzAMnTlzRhEREVf9vLvffJn54YcfFBkZaXYMAABQCunp6apdu/YV9/nNl5nCt91OT09XUFCQyWkAAMC1yM7OVmRk5DV9fMZvvswUXloKCgqizAAAYDPXMkWECcAAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWvM0OgPITNfVDsyOgAh19qrfZEQDAFJyZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtkaZAQAAtmZqmdm6dav69u2riIgIORwOrVy5ssg++/btU79+/RQcHKyAgAC1bdtW3377bcWHBQAAlmRqmcnNzVVMTIwWLFhQ7PbDhw/rtttuU+PGjbV582Z9+eWXmj59uvz8/Co4KQAAsCpvM588Li5OcXFxl93++OOPq1evXpo3b557LDo6uiKiAQAAm7DsnJmCggJ9+OGHatiwoXr06KGwsDDdfPPNxV6KupTL5VJ2drbHAgAAfrssW2YyMzOVk5Ojp556Sj179tS6des0YMAA3XXXXdqyZctlj0tISFBwcLB7iYyMrMDUAACgolm2zBQUFEiS7rzzTj388MNq2bKlpk6dqj59+uiVV1657HHTpk1TVlaWe0lPT6+oyAAAwASmzpm5kurVq8vb21tNmzb1GG/SpIk+/fTTyx7ndDrldDrLOx4AALAIy56Z8fX1Vdu2bZWWluYxfuDAAdWtW9ekVAAAwGpMPTOTk5OjQ4cOudePHDmi3bt3q1q1aqpTp44eeeQRDRw4UB07dlTnzp21Zs0a/fvf/9bmzZvNCw0AACzF1DKTmpqqzp07u9cnT54sSYqPj1dSUpIGDBigV155RQkJCZowYYIaNWqkd955R7fddptZkQEAgMWYWmZiY2NlGMYV9xk5cqRGjhxZQYkAAIDdWHbODAAAwLWgzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFsztcxs3bpVffv2VUREhBwOh1auXHnZfR944AE5HA4lJiZWWD4AAGB9ppaZ3NxcxcTEaMGCBVfc77333tP27dsVERFRQckAAIBdeJv55HFxcYqLi7viPt9//73Gjx+vtWvXqnfv3hWUDAAA2IWpZeZqCgoKNHToUD3yyCNq1qzZNR3jcrnkcrnc69nZ2eUVDwAAWIClJwA//fTT8vb21oQJE675mISEBAUHB7uXyMjIckwIAADMZtkys3PnTr3wwgtKSkqSw+G45uOmTZumrKws95Kenl6OKQEAgNksW2Y++eQTZWZmqk6dOvL29pa3t7eOHTumKVOmKCoq6rLHOZ1OBQUFeSwAAOC3y7JzZoYOHapu3bp5jPXo0UNDhw7ViBEjTEoFAACsxtQyk5OTo0OHDrnXjxw5ot27d6tatWqqU6eOQkNDPfb38fFRzZo11ahRo4qOCgAALMrUMpOamqrOnTu71ydPnixJio+PV1JSkkmpAACAnZhaZmJjY2UYxjXvf/To0fILAwAAbMmyE4ABAACuBWUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYGmUGAADYmqllZuvWrerbt68iIiLkcDi0cuVK97aLFy/qscceU4sWLRQQEKCIiAgNGzZMP/zwg3mBAQCA5ZhaZnJzcxUTE6MFCxYU2Xb27Fnt2rVL06dP165du/Tuu+8qLS1N/fr1MyEpAACwKm8znzwuLk5xcXHFbgsODtb69es9xubPn6927drp22+/VZ06dSoiIgAAsDhTy0xJZWVlyeFwqGrVqpfdx+VyyeVyudezs7MrIBkAADCLbSYAnz9/Xo899pgGDx6soKCgy+6XkJCg4OBg9xIZGVmBKQEAQEWzRZm5ePGi7r33XhmGoYULF15x32nTpikrK8u9pKenV1BKAABgBstfZiosMseOHdPHH398xbMykuR0OuV0OisoHQAAMJuly0xhkTl48KA2bdqk0NBQsyMBAACLMbXM5OTk6NChQ+71I0eOaPfu3apWrZpq1aqlu+++W7t27dIHH3yg/Px8ZWRkSJKqVasmX19fs2IDAAALMbXMpKamqnPnzu71yZMnS5Li4+M1a9Ysvf/++5Kkli1behy3adMmxcbGVlRMAABgYaaWmdjYWBmGcdntV9oGAAAg2eRuJgAAgMuhzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFsztcxs3bpVffv2VUREhBwOh1auXOmx3TAMzZgxQ7Vq1ZK/v7+6deumgwcPmhMWAABYkqllJjc3VzExMVqwYEGx2+fNm6cXX3xRr7zyinbs2KGAgAD16NFD58+fr+CkAADAqrzNfPK4uDjFxcUVu80wDCUmJupvf/ub7rzzTknS66+/rvDwcK1cuVKDBg2qyKgAAMCiLDtn5siRI8rIyFC3bt3cY8HBwbr55pv1+eefm5gMAABYialnZq4kIyNDkhQeHu4xHh4e7t5WHJfLJZfL5V7Pzs4un4AAAMASLHtmprQSEhIUHBzsXiIjI82OBAAAypFly0zNmjUlSSdOnPAYP3HihHtbcaZNm6asrCz3kp6eXq45AQCAuSxbZurVq6eaNWtq48aN7rHs7Gzt2LFD7du3v+xxTqdTQUFBHgsAAPjtMnXOTE5Ojg4dOuReP3LkiHbv3q1q1aqpTp06mjRpkp544gk1aNBA9erV0/Tp0xUREaH+/fubFxoAAFhKqcrMH/7wB6WkpCg0NNRj/PTp07rpppv0zTffXNPjpKamqnPnzu71yZMnS5Li4+OVlJSkRx99VLm5uRo9erROnz6t2267TWvWrJGfn19pYgMAgN8gh2EYRkkP8vLyUkZGhsLCwjzGT5w4oTp16njcTWS27OxsBQcHKysr63d3ySlq6odmR0AFOvpUb7MjAECZKcm/3yU6M/P++++7/7x27VoFBwe71/Pz87Vx40ZFRUWVLC0AAMB1KFGZKZyr4nA4FB8f77HNx8dHUVFR+sc//lFm4QAAAK6mRGWmoKBA0i93GqWkpKh69erlEgoAAOBalWoC8JEjR8o6BwAAQKmU+tbsjRs3auPGjcrMzHSfsSm0ePHi6w4GAABwLUpVZmbPnq05c+aoTZs2qlWrlhwOR1nnAgAAuCalKjOvvPKKkpKSNHTo0LLOAwAAUCKl+jiDCxcuqEOHDmWdBQAAoMRKVWZGjRqlZcuWlXUWAACAEivVZabz589r0aJF2rBhg2688Ub5+Ph4bH/uuefKJBwAAMDVlKrMfPnll2rZsqUkae/evR7bmAwMAAAqUqnKzKZNm8o6BwAAQKmUas4MAACAVZTqzEznzp2veDnp448/LnUgAACAkihVmSmcL1Po4sWL2r17t/bu3VvkAygBAADKU6nKzPPPP1/s+KxZs5STk3NdgQAAAEqiTOfM3HfffXwuEwAAqFBlWmY+//xz+fn5leVDAgAAXFGpLjPdddddHuuGYej48eNKTU3V9OnTyyQYAADAtShVmQkODvZY9/LyUqNGjTRnzhx17969TIIBAABci1KVmSVLlpR1DgAAgFIpVZkptHPnTu3bt0+S1KxZM7Vq1apMQgEAAFyrUpWZzMxMDRo0SJs3b1bVqlUlSadPn1bnzp311ltvqUaNGmWZEQAA4LJKdTfT+PHjdebMGf33v//VqVOndOrUKe3du1fZ2dmaMGFCWWcEAAC4rFKdmVmzZo02bNigJk2auMeaNm2qBQsWMAEYAABUqFKdmSkoKJCPj0+RcR8fHxUUFFx3KAAAgGtVqjLTpUsXTZw4UT/88IN77Pvvv9fDDz+srl27llk4AACAqylVmZk/f76ys7MVFRWl6OhoRUdHq169esrOztZLL71U1hkBAAAuq1RzZiIjI7Vr1y5t2LBB+/fvlyQ1adJE3bp1K9NwAAAAV1OiMzMff/yxmjZtquzsbDkcDt1xxx0aP368xo8fr7Zt26pZs2b65JNPyisrAABAESUqM4mJifrzn/+soKCgItuCg4M1ZswYPffcc2UWDgAA4GpKVGb27Nmjnj17XnZ79+7dtXPnzusOVSg/P1/Tp09XvXr15O/vr+joaM2dO1eGYZTZcwAAAHsr0ZyZEydOFHtLtvvBvL118uTJ6w5V6Omnn9bChQv1z3/+U82aNVNqaqpGjBih4OBg3pwPAABIKmGZueGGG7R3717Vr1+/2O1ffvmlatWqVSbBJOmzzz7TnXfeqd69e0uSoqKi9Oabb+o///lPmT0HAACwtxJdZurVq5emT5+u8+fPF9l27tw5zZw5U3369CmzcB06dNDGjRt14MABSb9c5vr0008VFxd32WNcLpeys7M9FgAA8NtVojMzf/vb3/Tuu++qYcOGGjdunBo1aiRJ2r9/vxYsWKD8/Hw9/vjjZRZu6tSpys7OVuPGjVWpUiXl5+frySef1JAhQy57TEJCgmbPnl1mGQAAgLWVqMyEh4frs88+09ixYzVt2jT3RFyHw6EePXpowYIFCg8PL7Nw//rXv5ScnKxly5apWbNm2r17tyZNmqSIiAjFx8cXe8y0adM0efJk93p2drYiIyPLLBMAALCWEr9pXt26dfXRRx/p559/1qFDh2QYhho0aKCQkJAyD/fII49o6tSpGjRokCSpRYsWOnbsmBISEi5bZpxOp5xOZ5lnAQAA1lSqdwCWpJCQELVt27YssxRx9uxZeXl5TuupVKkSH2YJAADcSl1mKkLfvn315JNPqk6dOmrWrJm++OILPffccxo5cqTZ0QAAgEVYusy89NJLmj59uh588EFlZmYqIiJCY8aM0YwZM8yOBgAALMLSZaZKlSpKTExUYmKi2VEAAIBFleh9ZgAAAKyGMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGyNMgMAAGzN8mXm+++/13333afQ0FD5+/urRYsWSk1NNTsWAACwCG+zA1zJzz//rFtvvVWdO3fW6tWrVaNGDR08eFAhISFmRwMAABZh6TLz9NNPKzIyUkuWLHGP1atXz8REAADAaix9men9999XmzZtdM899ygsLEytWrXSa6+9dsVjXC6XsrOzPRYAAPDbZeky880332jhwoVq0KCB1q5dq7Fjx2rChAn65z//edljEhISFBwc7F4iIyMrMDEAAKhoDsMwDLNDXI6vr6/atGmjzz77zD02YcIEpaSk6PPPPy/2GJfLJZfL5V7Pzs5WZGSksrKyFBQUVO6ZrSRq6odmR0AFOvpUb7MjAECZyc7OVnBw8DX9+23pMzO1atVS06ZNPcaaNGmib7/99rLHOJ1OBQUFeSwAAOC3y9Jl5tZbb1VaWprH2IEDB1S3bl2TEgEAAKuxdJl5+OGHtX37dv3973/XoUOHtGzZMi1atEgPPfSQ2dEAAIBFWLrMtG3bVu+9957efPNNNW/eXHPnzlViYqKGDBlidjQAAGARln6fGUnq06eP+vTpY3YMAABgUZY+MwMAAHA1lBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrlBkAAGBrtiozTz31lBwOhyZNmmR2FAAAYBG2KTMpKSl69dVXdeONN5odBQAAWIgtykxOTo6GDBmi1157TSEhIWbHAQAAFmKLMvPQQw+pd+/e6tat21X3dblcys7O9lgAAMBvl7fZAa7mrbfe0q5du5SSknJN+yckJGj27NnlnAoAAFiFpc/MpKena+LEiUpOTpafn981HTNt2jRlZWW5l/T09HJOCQAAzGTpMzM7d+5UZmambrrpJvdYfn6+tm7dqvnz58vlcqlSpUoexzidTjmdzoqOCgAATGLpMtO1a1d99dVXHmMjRoxQ48aN9dhjjxUpMgAA4PfH0mWmSpUqat68ucdYQECAQkNDi4wDAIDfJ0vPmQEAALgaS5+ZKc7mzZvNjgAAACyEMzMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWKDMAAMDWLF9mEhIS1LZtW1WpUkVhYWHq37+/0tLSzI4FAAAswvJlZsuWLXrooYe0fft2rV+/XhcvXlT37t2Vm5trdjQAAGAB3mYHuJo1a9Z4rCclJSksLEw7d+5Ux44dTUoFAACswvJl5teysrIkSdWqVSt2u8vlksvlcq9nZ2dXSC4AAGAOy19mulRBQYEmTZqkW2+9Vc2bNy92n4SEBAUHB7uXyMjICk4JAAAqkq3KzEMPPaS9e/fqrbfeuuw+06ZNU1ZWlntJT0+vwIQAAKCi2eYy07hx4/TBBx9o69atql279mX3czqdcjqdFZgMAACYyfJlxjAMjR8/Xu+99542b96sevXqmR0JAABYiOXLzEMPPaRly5Zp1apVqlKlijIyMiRJwcHB8vf3NzkdAAAwm+XnzCxcuFBZWVmKjY1VrVq13Mvbb79tdjQAAGABlj8zYxiG2REAAICFWf7MDAAAwJVQZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK1RZgAAgK15mx0AAFByUVM/NDsCKtDRp3qbHcHSODMDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABsjTIDAABszRZlZsGCBYqKipKfn59uvvlm/ec//zE7EgAAsAjLl5m3335bkydP1syZM7Vr1y7FxMSoR48eyszMNDsaAACwAMuXmeeee05//vOfNWLECDVt2lSvvPKKKleurMWLF5sdDQAAWICly8yFCxe0c+dOdevWzT3m5eWlbt266fPPPzcxGQAAsApvswNcyY8//qj8/HyFh4d7jIeHh2v//v3FHuNyueRyudzrWVlZkqTs7OzyC2pRBa6zZkdABfo9fo//nvHz/fvye/z5LvyaDcO46r6WLjOlkZCQoNmzZxcZj4yMNCENUHGCE81OAKC8/J5/vs+cOaPg4OAr7mPpMlO9enVVqlRJJ06c8Bg/ceKEatasWewx06ZN0+TJk93rBQUFOnXqlEJDQ+VwOMo1L8yXnZ2tyMhIpaenKygoyOw4AMoQP9+/L4Zh6MyZM4qIiLjqvpYuM76+vmrdurU2btyo/v37S/qlnGzcuFHjxo0r9hin0ymn0+kxVrVq1XJOCqsJCgrif3bAbxQ/378fVzsjU8jSZUaSJk+erPj4eLVp00bt2rVTYmKicnNzNWLECLOjAQAAC7B8mRk4cKBOnjypGTNmKCMjQy1bttSaNWuKTAoGAAC/T5YvM5I0bty4y15WAi7ldDo1c+bMIpcaAdgfP9+4HIdxLfc8AQAAWJSl3zQPAADgaigzAADA1igzAADA1igzAADA1igzAADA1igzAADA1mzxPjMAgN+PVq1aXfNn6e3atauc08AOKDMAAEsp/Cw+STp//rxefvllNW3aVO3bt5ckbd++Xf/973/14IMPmpQQVsOb5gEALGvUqFGqVauW5s6d6zE+c+ZMpaena/HixSYlg5VQZgAAlhUcHKzU1FQ1aNDAY/zgwYNq06aNsrKyTEoGK2ECMADAsvz9/bVt27Yi49u2bZOfn58JiWBFzJkBAFjWpEmTNHbsWO3atUvt2rWTJO3YsUOLFy/W9OnTTU4Hq+AyEwDA0v71r3/phRde0L59+yRJTZo00cSJE3XvvfeanAxWQZkBAFhSXl6e/v73v2vkyJGqXbu22XFgYZQZAIBlBQYGau/evYqKijI7CiyMCcAAAMvq2rWrtmzZYnYMWBwTgAEAlhUXF6epU6fqq6++UuvWrRUQEOCxvV+/fiYlg5VwmQkAYFleXpe/gOBwOJSfn1+BaWBVlBkAAGBrzJkBAAC2xpwZAICl5ebmasuWLfr222914cIFj20TJkwwKRWshMtMAADL+uKLL9SrVy+dPXtWubm5qlatmn788UdVrlxZYWFh+uabb8yOCAvgMhMAwLIefvhh9e3bVz///LP8/f21fft2HTt2TK1bt9azzz5rdjxYBGdmAACWVbVqVe3YsUONGjVS1apV9fnnn6tJkybasWOH4uPjtX//frMjwgI4MwMAsCwfHx/37dlhYWH69ttvJUnBwcFKT083MxoshAnAAADLatWqlVJSUtSgQQN16tRJM2bM0I8//qg33nhDzZs3NzseLILLTAAAy0pNTdWZM2fUuXNnZWZmatiwYfrss8/UoEEDLV68WDExMWZHhAVQZgAAgK1xmQkAYHknT55UWlqaJKlx48aqXr26yYlgJUwABgBYVm5urkaOHKmIiAh17NhRHTt2VK1atXT//ffr7NmzZseDRVBmAACWNXnyZG3ZskXvv/++Tp8+rdOnT2vVqlXasmWLpkyZYnY8WARzZgAAllW9enWtWLFCsbGxHuObNm3Svffeq5MnT5oTDJbCmRkAgGWdPXtW4eHhRcbDwsK4zAQ3zswAACyra9euCg0N1euvvy4/Pz9J0rlz5xQfH69Tp05pw4YNJieEFVBmAACWtXfvXvXo0UMul8v9njJ79uyRn5+f1q5dq2bNmpmcEFZAmQEAWNrZs2eVnJzs/hymJk2aaMiQIfL39zc5GayCMgMAAGyNN80DAFhaWlqaXnrpJe3bt0/SL2dmxo0bp8aNG5ucDFbB3UwAAMt655131Lx5c+3cuVMxMTGKiYnRrl271KJFC73zzjtmx4NFcJkJAGBZ0dHRGjJkiObMmeMxPnPmTC1dulSHDx82KRmshDIDALCsypUr68svv1T9+vU9xg8ePKiYmBjeawaSuMwEALCw2NhYffLJJ0XGP/30U91+++0mJIIVMQEYAGAp77//vvvP/fr102OPPaadO3fqlltukSRt375dy5cv1+zZs82KCIvhMhMAwFK8vK7tooHD4VB+fn45p4EdUGYAAICtMWcGAGBZ58+fNzsCbIAzMwAAy/Lz81O7du3UqVMnxcbGqkOHDnyMAYqgzAAALOvTTz/V1q1btXnzZn322WfKy8tTmzZt3OXmjjvuMDsiLIAyAwCwhby8PKWkpOjVV19VcnKyCgoKmAAMSdyaDQCwuAMHDmjz5s3uxeVyqU+fPoqNjTU7GiyCMzMAAMu64YYbdO7cOcXGxio2NladOnXSjTfeKIfDYXY0WAh3MwEALKtGjRo6e/asMjIylJGRoRMnTujcuXNmx4LFcGYGAGBpp0+f1tatW7VlyxZt2bJFX3/9tVq2bKnOnTvrySefNDseLIAyAwCwhZ9++kmbN2/WqlWr9OabbzIBGG6UGQCAZb377rvuib9ff/21qlWrpttuu809fyYmJsbsiLAAygwAwLLCwsLUsWNHd3lp0aKF2ZFgQZQZAABga7zPDADA0vLz87Vy5Urt27dPktS0aVPdeeedqlSpksnJYBWcmQEAWNahQ4fUq1cvff/992rUqJEkKS0tTZGRkfrwww8VHR1tckJYAWUGAGBZvXr1kmEYSk5OVrVq1ST9clfTfffdJy8vL3344YcmJ4QVUGYAAJYVEBCg7du3F5n4u2fPHt16663KyckxKRmshHcABgBYltPp1JkzZ4qM5+TkyNfX14REsCLKDADAsvr06aPRo0drx44dMgxDhmFo+/bteuCBB9SvXz+z48EiuMwEALCs06dPKz4+Xv/+97/l4+MjScrLy1O/fv2UlJSk4OBgkxPCCigzAADLO3jwoPbv3y9JatKkierXr29yIlgJZQYAANgab5oHALCs/Px8JSUlaePGjcrMzFRBQYHH9o8//tikZLASygwAwLImTpyopKQk9e7dW82bN5fD4TA7EiyIy0wAAMuqXr26Xn/9dfXq1cvsKLAwbs0GAFiWr68vk31xVZQZAIBlTZkyRS+88IK4iIAr4TITAMCyBgwYoE2bNqlatWpq1qyZ+71mCr377rsmJYOVMAEYAGBZVatW1YABA8yOAYvjzAwAwLLOnTungoICBQQESJKOHj2qlStXqkmTJurRo4fJ6WAVzJkBAFjWnXfeqTfeeEPSLx9tcMstt+gf//iH+vfvr4ULF5qcDlZBmQEAWNauXbt0++23S5JWrFih8PBwHTt2TK+//rpefPFFk9PBKigzAADLOnv2rKpUqSJJWrdune666y55eXnplltu0bFjx0xOB6ugzAAALKt+/fpauXKl0tPTtXbtWnXv3l2SlJmZqaCgIJPTwSooMwAAy5oxY4b+8pe/KCoqSjfffLPat28v6ZezNK1atTI5HayCu5kAAJaWkZGh48ePKyYmRl5ev/wO/p///EdBQUFq3LixyelgBZQZAABga1xmAgAAtkaZAQAAtkaZAQAAtkaZAWCa4cOHq3///mbHAGBzlBkAHoYPHy6HwyGHwyFfX1/Vr19fc+bMUV5entnRriopKUlVq1a94j6xsbHur6+4JTY2tkKyAig7fGo2gCJ69uypJUuWyOVy6aOPPtJDDz0kHx8fTZs2rci+Fy5ckK+vrwkpS+fdd9/VhQsXJEnp6elq166dNmzYoGbNmkmSrb4WAL/gzAyAIpxOp2rWrKm6detq7Nix6tatm95//31J///S0JNPPqmIiAg1atRIkvTVV1+pS5cu8vf3V2hoqEaPHq2cnBz3Y+bn52vy5MmqWrWqQkND9eijj+rX7wwRFRWlxMREj7GWLVtq1qxZ7vXTp09rzJgxCg8Pl5+fn5o3b64PPvhAmzdv1ogRI5SVleU+y3LpcYWqVaummjVrqmbNmqpRo4YkKTQ0VDVr1tSf/vQnzZgxw2P/kydPytfXVxs3bnRnnDt3rgYPHqyAgADdcMMNWrBggccxp0+f1qhRo1SjRg0FBQWpS5cu2rNnz7X/BQAoEcoMgKvy9/d3n82QpI0bNyotLU3r16/XBx98oNzcXPXo0UMhISFKSUnR8uXLtWHDBo0bN859zD/+8Q8lJSVp8eLF+vTTT3Xq1Cm99957JcpRUFCguLg4bdu2TUuXLtXXX3+tp556SpUqVVKHDh2UmJiooKAgHT9+XMePH9df/vKXEj3+qFGjtGzZMrlcLvfY0qVLdcMNN6hLly7usWeeeUYxMTH64osvNHXqVE2cOFHr1693b7/nnnuUmZmp1atXa+fOnbrpppvUtWtXnTp1qkR5AFwjAwAuER8fb9x5552GYRhGQUGBsX79esPpdBp/+ctf3NvDw8MNl8vlPmbRokVGSEiIkZOT4x778MMPDS8vLyMjI8MwDMOoVauWMW/ePPf2ixcvGrVr13Y/l2EYRt26dY3nn3/eI09MTIwxc+ZMwzAMY+3atYaXl5eRlpZWbPYlS5YYwcHB1/y1HjlyxJBkfPHFF4ZhGMa5c+eMkJAQ4+2333bvc+ONNxqzZs3yyNizZ0+Pxxk4cKARFxdnGIZhfPLJJ0ZQUJBx/vx5j32io6ONV1999ZqzAbh2nJkBUMQHH3ygwMBA+fn5KS4uTgMHDvS4ZNOiRQuPuSX79u1TTEyMAgIC3GO33nqrCgoKlJaWpqysLB0/flw333yze7u3t7fatGlToly7d+9W7dq11bBhw9J/cVfg5+enoUOHavHixZKkXbt2ae/evRo+fLjHfoWfD3Tp+r59+yRJe/bsUU5OjkJDQxUYGOhejhw5osOHD5dLbuD3jgnAAIro3LmzFi5cKF9fX0VERMjb2/N/FZeWlrLk5eVVZB7NxYsX3X/29/cvl+e91KhRo9SyZUt99913WrJkibp06aK6dete8/E5OTmqVauWNm/eXGTb1e60AlA6nJkBUERAQIDq16+vOnXqFCkyxWnSpIn27Nmj3Nxc99i2bdvk5eWlRo0aKTg4WLVq1dKOHTvc2/Py8rRz506Px6lRo4aOHz/uXs/OztaRI0fc6zfeeKO+++47HThwoNgcvr6+ys/Pv+avszgtWrRQmzZt9Nprr2nZsmUaOXJkkX22b99eZL1JkyaSpJtuukkZGRny9vZW/fr1PZbq1atfVzYAxaPMALhuQ4YMkZ+fn+Lj47V3715t2rRJ48eP19ChQxUeHi5Jmjhxop566imtXLlS+/fv14MPPqjTp097PE6XLl30xhtv6JNPPtFXX32l+Ph4VapUyb29U6dO6tixo/74xz9q/fr1OnLkiFavXq01a9ZI+uVOo5ycHG3cuFE//vijzp49W6qvZ9SoUXrqqadkGIYGDBhQZPu2bds0b948HThwQAsWLNDy5cs1ceJESVK3bt3Uvn179e/fX+vWrdPRo0f12Wef6fHHH1dqamqp8gC4MsoMgOtWuXJlrV27VqdOnVLbtm119913q2vXrpo/f757nylTpmjo0KGKj49X+/btVaVKlSJFYdq0aerUqZP69Omj3r17q3///oqOjvbY55133lHbtm01ePBgNW3aVI8++qj7bEyHDh30wAMPaODAgapRo4bmzZtXqq9n8ODB8vb21uDBg+Xn51dk+5QpU5SamqpWrVrpiSee0HPPPacePXpIkhwOhz766CN17NhRI0aMUMOGDTVo0CAdO3bMXewAlC2H8esL1ADwO3f06FFFR0crJSVFN910k8e2qKgoTZo0SZMmTTInHIAimAAMAP/n4sWL+umnn/S3v/1Nt9xyS5EiA8CauMwEAP9n27ZtqlWrllJSUvTKK6+YHQfANeIyEwAAsDXOzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFujzAAAAFv7f92Y8Y5sQlI+AAAAAElFTkSuQmCC",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Product Variants Inventory Analysis\n",
"product_variants.groupby('product_id')['inventory_quantity'].sum().plot(kind='bar')\n",
"plt.title('Total Inventory Quantity by Product')\n",
"plt.xlabel('Product ID')\n",
"plt.ylabel('Total Inventory Quantity')\n",
"plt.show()"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_Snowflake_Cortex_Github.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "n3fUVL_19YGb"
},
"source": [
"# Storing vector data into Snowflake using PyAirbyte, Snowflake Cortex\n",
"\n",
"In this notebook, we'll illustrate how to load data from airbyte-source into Snowflake using PyAirbyte, and afterwards convert the stream data into vector. In this, we've used source-github and stream 'issues', but you can replace the source according to your requirements.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **GitHub Access Token**:\n",
" - Follow the instructions in the [Github Connector Docs](https://docs.airbyte.com/integrations/sources/github) to set up your github and get api_token.\n",
"\n",
"2. **Snowflake**:\n",
" - To set up snowflake, follow these [instructions](https://docs.airbyte.com/integrations/destinations/snowflake#login-and-password).\n",
"\n",
"\n",
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "OhMxXpazUzBX"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte snowflake-connector-python"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "KqfoQD2dAJtd"
},
"source": [
"# Setup Source Github"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "QwV29bK4VbL4"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-github\",\n",
" config={\n",
" \"repositories\": [\"airbytehq/quickstarts\"],\n",
" \"credentials\": {\n",
" \"personal_access_token\": ab.get_secret(\"GITHUB_API_KEY\"),\n",
" },\n",
" },\n",
")\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "9avgcWxs9Ura"
},
"source": [
"Reads the data from the selected issues stream, extracting the GitHub issues data for further processing."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "s8VFZEK8XbA0"
},
"outputs": [],
"source": [
"source.get_available_streams()\n",
"source.select_streams([\"issues\"]) # we are only interested in issues stream\n",
"read_result = source.read()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MzeTFZ58YVEo"
},
"outputs": [],
"source": [
"issues = [doc for doc in read_result[\"issues\"].to_documents()] # Will be useful for vector_embedding\n",
"issue_df = read_result['issues'].to_pandas() # Converting data to pandas frame\n",
"print(str(issues[5]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6fivYEfdBafd"
},
"source": [
"# Loading data into Snowflake\n",
"It uses the snowflake.connector module to connect to Snowflake with the provided credentials fetched from secrets, Make sure to add your key to the Secrets section on the left."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2EM-oHR3cXXc"
},
"outputs": [],
"source": [
"from snowflake import connector\n",
"conn = connector.connect(\n",
" account=ab.get_secret(\"SNOWFLAKE_HOST\"),\n",
" role=ab.get_secret(\"SNOWFLAKE_ROLE\"),\n",
" warehouse=ab.get_secret(\"SNOWFLAKE_WAREHOUSE\"),\n",
" database=ab.get_secret(\"SNOWFLAKE_DATABASE\"),\n",
" schema=ab.get_secret(\"SNOWFLAKE_SCHEMA\"),\n",
" user=ab.get_secret(\"SNOWFLAKE_USERNAME\"),\n",
" password=ab.get_secret(\"SNOWFLAKE_PASSWORD\"),\n",
" )\n",
"cur = conn.cursor()\n",
"\n",
"print(ab.get_secret(\"SNOWFLAKE_SCHEMA\"))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "85uLgQ7JBgoI"
},
"source": [
"A function to create a Snowflake table based on the schema of a Pandas DataFrame and then uses this function to create a github_issue table in Snowflake from the issue_df DataFrame."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "X96O3Is-i3v4"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"def create_table_from_dataframe(conn, df, table_name):\n",
" cursor = conn.cursor()\n",
" database = ab.get_secret('SNOWFLAKE_DATABASE')\n",
" print(database)\n",
" cursor.execute(f'USE DATABASE {database}')\n",
" schema_name = ab.get_secret('SNOWFLAKE_SCHEMA')\n",
" cursor.execute(f'USE SCHEMA {schema_name}')\n",
" columns = []\n",
" for column, dtype in zip(df.columns, df.dtypes):\n",
" if pd.api.types.is_integer_dtype(dtype):\n",
" snowflake_type = 'INTEGER'\n",
" elif pd.api.types.is_float_dtype(dtype):\n",
" snowflake_type = 'FLOAT'\n",
" elif pd.api.types.is_bool_dtype(dtype):\n",
" snowflake_type = 'BOOLEAN'\n",
" elif pd.api.types.is_datetime64_any_dtype(dtype):\n",
" snowflake_type = 'TIMESTAMP'\n",
" else:\n",
" snowflake_type = 'STRING'\n",
"\n",
" columns.append(f'\"{column}\" {snowflake_type}')\n",
"\n",
" create_table_sql = f'CREATE TABLE {table_name} ({\", \".join(columns)});'\n",
" cursor.execute(create_table_sql)\n",
"\n",
"# Example usage:\n",
"create_table_from_dataframe(conn, issue_df, 'github_issue') # Keep table name according to your requirments\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RePkO21hCSZx"
},
"source": [
"upload_dataframe_to_snowflake that uses Snowflake's pandas integration (write_pandas) to upload a Pandas DataFrame (issue_df) into a Snowflake table ('GITHUB_ISSUE')."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Ku61-vxBt7Xh"
},
"outputs": [],
"source": [
"from snowflake.connector.pandas_tools import write_pandas\n",
"def upload_dataframe_to_snowflake(conn, df, table_name):\n",
" success, nchunks, nrows, _ = write_pandas(conn, df, table_name)\n",
" if success:\n",
" print(f\"Successfully inserted {nrows} rows into {table_name}.\")\n",
" else:\n",
" print(\"Failed to insert data.\")\n",
"\n",
"upload_dataframe_to_snowflake(conn, issue_df, 'GITHUB_ISSUE') # Remember to use table name in uppercase\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ppxeChRQCmm8"
},
"source": [
"# Vector Embedding the Data\n",
"Now we utilize the RecursiveCharacterTextSplitter from langchain.text_splitter to segment documents (issues) into smaller chunks based on specified parameters (chunk_size and chunk_overlap).\n",
"\n",
"Then we organize the chunked documents into a Pandas DataFrame (df) with columns for page content (PAGE_CONTENT), metadata (META), and type (TYPE), ensuring all data is represented as strings for consistency and analysis."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "5ti5uUpsu2oU"
},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"import pandas as pd\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(issues)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])\n",
"\n",
"df = pd.DataFrame(chunked_docs, columns=['PAGE_CONTENT','META','TYPE']) # please use uppercase\n",
"# Convert all columns to string\n",
"df = df.astype(str)\n",
"print(df.head(3))\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "wfSflgGwDkjN"
},
"source": [
"Now we establish a new table for storing vector embedded data. First, we create a data and store chunked data, and then we vector embed the data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "qFK12XSiNej_"
},
"outputs": [],
"source": [
"create_table_from_dataframe(conn, df, 'vector_github_issues')\n",
"upload_dataframe_to_snowflake(conn, df, 'VECTOR_GITHUB_ISSUES') #use uppercase"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "1Wjq35UMECIp"
},
"source": [
"Now, using Snowflake Cortex, we will turn the page content column into embedding and store them in the embedding column. Different embedding models are available [here](https://docs.snowflake.com/en/sql-reference/functions/embed_text_1024-snowflake-cortex)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "vHHZ5fn7Rcs2"
},
"outputs": [],
"source": [
"\n",
"cur = conn.cursor()\n",
"\n",
"# Step 1: Add the new column to store the embeddings\n",
"\n",
"# We are using vector dimension 1024\n",
"alter_table_query = \"\"\"\n",
"ALTER TABLE VECTOR_GITHUB_ISSUES\n",
"ADD COLUMN embedding VECTOR(FLOAT, 1024);\n",
"\"\"\"\n",
"cur.execute(alter_table_query)\n",
"\n",
"# Step 2: Update the new column with embeddings from Cortex\n",
"# Note: Using a subquery to avoid issues with updating the same table in place\n",
"update_query = \"\"\"\n",
"UPDATE VECTOR_GITHUB_ISSUES\n",
"SET embedding = (\n",
" SELECT SNOWFLAKE.CORTEX.EMBED_TEXT_1024('nv-embed-qa-4', page_content)\n",
");\n",
"\"\"\"\n",
"cur.execute(update_query)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "1uyFarH2Fjvf"
},
"source": [
"This approach demonstrates how to seamlessly integrate data retrieval from an Airbyte source, such as GitHub issues, and efficiently store it in Snowflake for further analysis. By utilizing PyAirbyte for data extraction and Snowflake's capabilities for data warehousing"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_Snowflake_Custom_Cache_Demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "R8XtHKK4PujA"
},
"source": [
"# PyAirbyte Custom Snowflake Cache Demo\n",
"\n",
"In this demo, we use PyAirbyte to ingest cryptocurrency data from [CoinAPI.io](https://www.coinapi.io/) into Snowflake.\n",
"\n",
"### Prerequisites\n",
"- CoinAPI [API key](https://www.coinapi.io/get-free-api-key?product_id=market-data-api).\n",
"- A Snowflake account with a database configured to work with PyAirbyte. Find specific details around config in our [documentation](https://docs.airbyte.com/integrations/destinations/snowflake)."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Lyxh2NLuQJUf"
},
"source": [
"## Install PyAirbyte\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "9DEgu1WpQNt-"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install PyAirbyte\n",
"%pip install --quiet airbyte"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "EwSqhsrmgcQ4"
},
"source": [
"## Define a Snowflake Cache\n",
"\n",
"Define a PyAirbyte Cache for Snowflake."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jJeZN6phg-1v"
},
"outputs": [],
"source": [
"from airbyte.caches import SnowflakeCache\n",
"from google.colab import userdata\n",
"\n",
"# Define a Snowflake Cache and pass the necessary configuration\n",
"sf_cache = SnowflakeCache(\n",
" account=userdata.get(\"SNOWFLAKE_ACCOUNT\"),\n",
" username=userdata.get(\"SNOWFLAKE_USERNAME\"),\n",
" password=userdata.get(\"SNOWFLAKE_PASSWORD\"),\n",
" warehouse=\"AIRBYTE_DEVELOP_WAREHOUSE\",\n",
" database=\"AIRBYTE_DEVELOP\",\n",
" role=\"AIRBYTE_DEVELOPER\",\n",
" schema_name=\"PYAIRBYTE_DEMO\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "JWWeEbTVEDFz"
},
"source": [
"## Load the Source Data using PyAirbyte\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "1PhfWpS8QVzE"
},
"source": [
"In this section, we establish a connection to CoinAPI.io to access cryptocurrency data via PyAirbyte. The connector is configured with necessary parameters like the API key, environment setting, symbol ID for the specific cryptocurrency index (in this case, COINBASE_SPOT_INDEX_USD), and the data period we are interested in. Check [the docs](https://docs.airbyte.com/integrations/sources/coin-api) for more details.\n",
"\n",
"We select all available streams for the source, which you can consult using the `get_available_streams()` method, or the docs. Then, we proceed to read from the source into Snowflake."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "5BI9hIeUvxXE"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"# Configure and read from the source\n",
"read_result = ab.get_source(\n",
" \"source-coin-api\",\n",
" config={\n",
" \"api_key\": userdata.get(\"API_KEY\"),\n",
" \"environment\": \"production\",\n",
" \"symbol_id\": \"COINBASE_SPOT_INDEX_USD\",\n",
" \"period\": \"1DAY\",\n",
" \"start_date\": \"2023-01-01T00:00:00\"\n",
" },\n",
" streams=[\"ohlcv_historical_data\", \"trades_historical_data\", \"quotes_historical_data\"],\n",
").read(cache=sf_cache)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "uPcaLfSCYrdo"
},
"source": [
"## Read data from Snowflake\n",
"\n",
"Read from the already-written Snowflake table into a pandas Dataframe. After the data is in the cache, you can read it without re-configuring or re-creating the source object."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"id": "z-nOAXR6Y-ui"
},
"outputs": [],
"source": [
"# Read from the cache into a pandas Dataframe:\n",
"ohlcv_df = read_result[\"ohlcv_historical_data\"].to_pandas()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "b6cMu9G2ZVxK"
},
"source": [
"## Run data transformations\n",
"\n",
"- Convert `time_period_start` to datetime for easy handling of dates.\n",
"- Convert numeric columns to numeric types for calculations.\n",
"- Calculate `daily_movement` to analyze daily price changes in the market."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"id": "hmRZZ5PsZYVW"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"# Convert 'time_period_start' to datetime format and necessary columns to numeric\n",
"ohlcv_df['time_period_start'] = pd.to_datetime(ohlcv_df['time_period_start'])\n",
"numeric_columns = ['price_open', 'price_high', 'price_low', 'price_close', 'volume_traded', 'trades_count']\n",
"ohlcv_df[numeric_columns] = ohlcv_df[numeric_columns].apply(pd.to_numeric, errors='coerce')\n",
"\n",
"# Calculate daily price movement\n",
"ohlcv_df['daily_movement'] = ohlcv_df['price_close'] - ohlcv_df['price_open']"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "knjH8Cx_zyCa"
},
"source": [
"## Write Dataframe to Snowflake\n",
"\n",
"Get a SQL engine from the Snowflake cache"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"id": "dsn6JSZzfBBw"
},
"outputs": [],
"source": [
"engine = sf_cache.get_sql_engine()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "nhTzao0y0ObW"
},
"source": [
"Now, we can write our transformed Dataframe back to Snowflake in a new table called `daily_movement`."
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"id": "gy1TzBf-f6If"
},
"outputs": [],
"source": [
"from snowflake.connector.pandas_tools import pd_writer\n",
"\n",
"ohlcv_df.to_sql('daily_movement', engine, index=False, method=pd_writer, if_exists='replace')"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/PyAirbyte_as_an_Orchestrator_Demo.ipynb
================================================
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# Using PyAirbyte as a data orchestrator for hosted Airbyte connections\n",
"This demo showcases how to automate and monitor Airbyte Cloud sync jobs using PyAirbyte. It includes setting up job executions, handling dependencies, sending real-time status updates, and visually representing job details and outcomes on a timeline."
],
"metadata": {
"id": "zSQIhJC1wTt4"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Anj2W5pm0n8E"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install PyAirbyte\n",
"%pip install --quiet airbyte"
]
},
{
"cell_type": "markdown",
"source": [
"## Job execution and notifications using PyAirbyte\n",
"Here, we initialize an Airbyte Cloud workspace with PyAirbyte to manage data sync jobs. We show how Slack can be used for real-time notifications on job statuses.\n",
"\n",
"The code below does the following:\n",
"\n",
"- Workspace initialization: Sets up the workspace with necessary credentials.\n",
"- Slack notifications: Configures a webhook for sending job statuses to a Slack channel.\n",
"- Job data tracking: Collects and stores execution details like start time, duration, and status for visualization.\n",
"- Run syncs: Executes jobs sequentially based on dependency success and sends detailed outcomes to Slack (or simply print them)."
],
"metadata": {
"id": "QtUXa_7tuXks"
}
},
{
"cell_type": "code",
"source": [
"import requests\n",
"from airbyte import cloud\n",
"from datetime import datetime\n",
"from google.colab import userdata\n",
"\n",
"# Initialize the Airbyte Cloud workspace\n",
"workspace = cloud.CloudWorkspace(\n",
" workspace_id=userdata.get(\"AIRBYTE_WORKSPACE_ID\"),\n",
" api_key=userdata.get(\"AIRBYTE_API_KEY\"),\n",
")\n",
"\n",
"# Slack webhook URL - replace with your actual Slack webhook URL\n",
"slack_webhook_url = \"https://hooks.slack.com/services/XXX/YYY/ZZZ\"\n",
"\n",
"# Initialize job data for visualization\n",
"job_data = []\n",
"\n",
"def send_slack_message(message, webhook_url):\n",
" \"\"\"Sends a message to a Slack channel specified by the webhook URL.\"\"\"\n",
" payload = {'text': message}\n",
" response = requests.post(webhook_url, json=payload)\n",
" return response.status_code\n",
"\n",
"def trigger_sync_and_check(connection_id):\n",
" \"\"\"Triggers a sync job for a specified connection ID and checks the job status.\"\"\"\n",
" start_time = datetime.now()\n",
" connection = workspace.get_connection(connection_id=connection_id)\n",
" sync_result = connection.run_sync()\n",
" end_time = datetime.now()\n",
" duration = (end_time - start_time).total_seconds()\n",
" status = str(sync_result.get_job_status())\n",
"\n",
" # Store job data for visualization\n",
" job_data.append({\n",
" 'connection_id': connection_id,\n",
" 'start_time': start_time,\n",
" 'end_time': end_time,\n",
" 'duration': duration,\n",
" 'status': status\n",
" })\n",
"\n",
" return status, sync_result, start_time, duration\n",
"\n",
"def format_sync_details(sync_result, start_time, duration):\n",
" \"\"\"Format sync details for Slack message, including start time and duration.\"\"\"\n",
" details = (\n",
" f\"Job ID: {sync_result.job_id}\\n\"\n",
" f\"Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}\\n\"\n",
" f\"Duration: {duration:.2f} seconds\\n\"\n",
" f\"Records Synced: {sync_result.records_synced}\\n\"\n",
" f\"Bytes Synced: {sync_result.bytes_synced}\\n\"\n",
" f\"Job URL: {sync_result.job_url}\"\n",
" )\n",
" return details\n",
"\n",
"def run_syncs():\n",
" \"\"\"Handle sequential sync jobs where the second job depends on the success of the first.\"\"\"\n",
" # Connection IDs for the two jobs. Replace with your connection IDs\n",
" first_connection_id = \"df2898ce-b21f-43c0-8c82-bd8c6dd24931\"\n",
" second_connection_id = \"86075252-9004-41fc-b5e3-f61f20388ff5\"\n",
"\n",
" first_status, first_sync_result, first_start, first_duration = trigger_sync_and_check(first_connection_id)\n",
" if \"SUCCEEDED\" in first_status:\n",
" message = f\"First sync succeeded (Connection ID: {first_connection_id}).\\nDetails:\\n{format_sync_details(first_sync_result, first_start, first_duration)}\\nProceeding to second sync.\"\n",
" send_slack_message(message, slack_webhook_url)\n",
" print(message)\n",
"\n",
" second_status, second_sync_result, second_start, second_duration = trigger_sync_and_check(second_connection_id)\n",
" if \"SUCCEEDED\" in second_status:\n",
" message = f\"Second sync succeeded (Connection ID: {second_connection_id}).\\nDetails:\\n{format_sync_details(second_sync_result, second_start, second_duration)}\"\n",
" else:\n",
" message = f\"Second sync failed (Connection ID: {second_connection_id}). Status: {second_status}\\nDetails:\\n{format_sync_details(second_sync_result, second_start, second_duration)}\"\n",
" else:\n",
" message = f\"First sync failed (Connection ID: {first_connection_id}). Status: {first_status}\\nDetails:\\n{format_sync_details(first_sync_result, first_start, first_duration)}\\nSecond sync not initiated.\"\n",
"\n",
" send_slack_message(message, slack_webhook_url)\n",
" print(message)"
],
"metadata": {
"id": "geGeKxan8jc1"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"run_syncs()"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "bOODO9mw850l",
"outputId": "b431b0db-ad1e-4e9a-eb96-823910d66b54"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"First sync succeeded (Connection ID: df2898ce-b21f-43c0-8c82-bd8c6dd24931).\n",
"Details:\n",
"Job ID: 10575770\n",
"Start Time: 2024-04-16 20:08:57\n",
"Duration: 155.78 seconds\n",
"Records Synced: 2100\n",
"Bytes Synced: 766685\n",
"Job URL: https://api.airbyte.com/v1/workspaces/ab2f3ba2-94e6-45b9-9d26-58b3f1d52295/connections/df2898ce-b21f-43c0-8c82-bd8c6dd24931/job-history/10575770\n",
"Proceeding to second sync.\n",
"Second sync succeeded (Connection ID: 86075252-9004-41fc-b5e3-f61f20388ff5).\n",
"Details:\n",
"Job ID: 10575853\n",
"Start Time: 2024-04-16 20:11:32\n",
"Duration: 123.54 seconds\n",
"Records Synced: 1\n",
"Bytes Synced: 214665\n",
"Job URL: https://api.airbyte.com/v1/workspaces/ab2f3ba2-94e6-45b9-9d26-58b3f1d52295/connections/86075252-9004-41fc-b5e3-f61f20388ff5/job-history/10575853\n"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"## Job execution timeline visualization\n",
"With the data collected from the syncs we previously ran using PyAirbyte, we can create a visualization that provides a clear and intuitive representation of a series of job executions over time.\n",
"\n",
"Each job is displayed as a rectangle along a timeline, where the width of each rectangle corresponds to the job's duration. The colors indicate the status of each job—green for successful completions and yellow for failures. This graph is particularly useful for quickly assessing the overall health and efficiency of a workflow. It allows viewers to see at a glance:\n",
"\n",
"- Job Durations: How long each job took to complete.\n",
"- Job Start Times: When each job began relative to the start of the timeline.\n",
"- Job Outcomes: The success or failure of each job."
],
"metadata": {
"id": "rPhUDRYOtPcr"
}
},
{
"cell_type": "code",
"source": [
"import matplotlib.pyplot as plt\n",
"import matplotlib.patches as patches\n",
"from datetime import datetime, timedelta\n",
"\n",
"def get_color(status):\n",
" \"\"\"Returns color based on the job status.\"\"\"\n",
" if \"SUCCEEDED\" in status:\n",
" return 'green' # Green for success\n",
" elif \"FAILED\" in status:\n",
" return 'yellow' # Yellow for failure\n",
" else:\n",
" return 'gray' # Gray for any other status\n",
"\n",
"def draw_job_graph(job_data):\n",
" \"\"\"Draws a graph of job executions where rectangles represent the duration and color indicates status.\"\"\"\n",
" fig, ax = plt.subplots(figsize=(14, 3))\n",
"\n",
" # Start position and time tracking\n",
" start_time = min(job['start_time'] for job in job_data) # Find the earliest start time\n",
" end_time = max(job['start_time'] + timedelta(seconds=job['duration']) for job in job_data) # Find the latest end time\n",
" total_duration = (end_time - start_time).total_seconds()\n",
"\n",
" for job in job_data:\n",
" start_offset = (job['start_time'] - start_time).total_seconds()\n",
" width = job['duration'] # Duration in seconds\n",
" color = get_color(job['status']) # Get color based on job status\n",
" rect = patches.Rectangle((start_offset, 0), width, 1, linewidth=1, edgecolor='black', facecolor=color)\n",
" ax.add_patch(rect)\n",
"\n",
" # Adding text inside the rectangle\n",
" ax.text(start_offset + width / 2, 0.5, f\"{job['connection_id']} ({job['duration']}s)\",\n",
" verticalalignment='center', horizontalalignment='center', color='black', fontweight='bold')\n",
"\n",
" # Set the limits and labels of the x-axis\n",
" ax.set_xlim(0, total_duration)\n",
" ax.set_ylim(-0.5, 1.5)\n",
" ax.set_xlabel(\"Time (seconds from start)\")\n",
" ax.set_title('Job Execution Timeline')\n",
" plt.xticks(range(0, int(total_duration) + 1, 20)) # Set x-ticks to appear every 20 seconds\n",
" plt.yticks([])\n",
" plt.grid(True, which='both', axis='x', linestyle='--', linewidth=0.5)\n",
" plt.show()"
],
"metadata": {
"id": "AsOSYXuMi1mS"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Visualize sync results"
],
"metadata": {
"id": "eWejfDkBv07x"
}
},
{
"cell_type": "code",
"source": [
"draw_job_graph(job_data)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 333
},
"id": "dGIhuTDxjAPF",
"outputId": "cf09410f-3b6b-4037-d152-2e6b5a7f0499"
},
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"
"
],
"image/png": "iVBORw0KGgoAAAANSUhEUgAABFUAAAE8CAYAAAABoI7+AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAButUlEQVR4nO3dd5xU1d3H8e/CLkvdXXoRBEREQERRQ7ChBsUSrIklajSxpNiwJZrHGh97bOExGk0EuyYRE2PBBnawISKINFksgEjbpbc9zx9n6u7s7pwfwwzDft6vFy9gdnbmzGdm7r1z5s6dAuecEwAAAAAAAII0yvUAAAAAAAAA8hGTKgAAAAAAAAZMqgAAAAAAABgwqQIAAAAAAGDApAoAAAAAAIABkyoAAAAAAAAGTKoAAAAAAAAYMKkCAAAAAABgwKQKAAAAAACAAZMqAABk0XXXXaeCggItWbIk10PZJkX7bKveeOMNFRQU6I033sj6dZeXl6ugoEBjxoyJnbat9wIAYHvHpAoAAAZjxoxRQUGBPvroo6xe75lnnqmCgoKUf5o2bZrVsVitWbNG1113XU4mJlKpq2ninzPPPDPXQwUAANuYwlwPAAAAhCkuLtbf/va3Gqc3btw4B6MJt2bNGl1//fWSpIMOOijpZ1dddZWuuOKKrI7nV7/6lYYNGxb7/7x583TNNdfo3HPP1QEHHBA7vVevXho8eLDWrl2rJk2aZHWMtclFLwAAEMekCgAAeaawsFCnnXZaroexVRQWFqqwMLubJ0OGDNGQIUNi///oo490zTXXaMiQISk7b0t7BOWiFwAAiOPjPwAAZMj48eN1wAEHqEWLFiorK9MxxxyjGTNmpDzvkiVLdOKJJ6qkpERt27bVRRddpHXr1mVkHM45HXzwwWrfvr0WL14cO33Dhg0aMGCAevXqpdWrV8dOf+yxx7TXXnupWbNmatOmjU4++WR9/fXXNS73/fff15FHHqnWrVurRYsW2n333XXPPffEfn7QQQfV2PNE8h+v6dGjhyR/XJD27dtLkq6//vrYR2uuu+46SamPEbJp0ybdcMMN6tWrl4qLi9WjRw/94Q9/0Pr165PO16NHD/34xz/WO++8ox/84Adq2rSpdtppJz3yyCNB/eqS6pgqBx10kHbbbTdNnTpVQ4cOVfPmzbXzzjvrX//6lyTpzTff1ODBg9WsWTP16dNHr732Wo3L/fbbb/XLX/5SHTt2VHFxsfr376+HHnqo3vGk6lVQUKDzzz9f//73v7XbbrvFLm/cuHEZu14AAOAxqQIAQAa89tprGj58uBYvXqzrrrtOl1xyid577z3tt99+Ki8vr3H+E088UevWrdPNN9+sI488Un/+85917rnnpn19S5YsqfGnsrJSkn9R/dBDD2ndunX69a9/Hfuda6+9VtOnT9fo0aPVokULSdKNN96on//85+rdu7fuvPNOjRw5Uq+//roOPPBArVixIva7r776qg488EB9/vnnuuiii3THHXfo4IMP1vPPPx/UqX379rrvvvskSccdd5weffRRPfroozr++ONr/Z2zzz5b11xzjQYNGqS77rpLQ4cO1c0336yTTz65xnnnzJmjn/zkJzr00EN1xx13qHXr1jrzzDM1ffr0oHGGWr58uX784x9r8ODBuu2221RcXKyTTz5ZTz/9tE4++WQdeeSRuuWWW7R69Wr95Cc/0cqVK2O/+9133+mHP/yhXnvtNZ1//vm65557tPPOO+uss87S3XffbRrPO++8o9/+9rc6+eSTddttt2ndunU64YQTtHTp0q16vQAANDgOAAAEGz16tJPkPvzwQ+ecc3vssYfr0KGDW7p0aew8n376qWvUqJH7+c9/Hjvt2muvdZLc0UcfnXR5v/3tb50k9+mnn9Z5vWeccYaTlPLP8OHDk87717/+1Ulyjz32mJs0aZJr3LixGzlyZOzn5eXlrnHjxu7GG29M+r3PPvvMFRYWxk7ftGmT69mzp+vevbtbvnx50nmrqqpi/x46dKgbOnRoyjF379499v/vv//eSXLXXnttjfNG+0RNmTLFSXJnn3120vkuu+wyJ8mNHz8+dlr37t2dJPfWW2/FTlu8eLErLi52l156aY3rqs2HH37oJLnRo0fX+NmECROcJDdhwoTYaUOHDnWS3BNPPBE77YsvvnCSXKNGjdykSZNip7/88ss1Lvuss85ynTt3dkuWLEm6rpNPPtmVlpa6NWvWOOecmzdvXo3frd7LOeckuSZNmrg5c+bETvv000+dJDdq1Kjg6wUAALVjTxUAALbQwoULNWXKFJ155plq06ZN7PTdd99dhx56qF588cUav3Peeecl/f+CCy6QpJTnra5p06Z69dVXa/y55ZZbks537rnnavjw4brgggt0+umnq1evXrrppptiPx87dqyqqqp04oknJu3x0qlTJ/Xu3VsTJkyQJH3yySeaN2+eRo4cqbKysqTr2Npf5xvtcckllySdfumll0qSXnjhhaTT+/Xrl3Rw2fbt26tPnz768ssvt+o4W7ZsmbTnTJ8+fVRWVqa+fftq8ODBsdOj/46OxzmnZ555RiNGjJBzLul+GD58uCoqKjR58uTg8QwbNky9evWK/X/33XdXSUnJVr9eAAAaGo5sBgDAFpo/f74k/0K6ur59++rll1/W6tWrYx+5kaTevXsnna9Xr15q1KhRyo8KVde4ceOkb6upy9///nf16tVLs2fP1nvvvadmzZrFfjZ79mw552qMJaqoqEiSNHfuXEnSbrvtltZ1ZtL8+fPVqFEj7bzzzkmnd+rUSWVlZbH2UTvuuGONy2jdurWWL1++VcfZtWvXGhNMpaWl6tatW43TJMXG8/3332vFihV64IEH9MADD6S87MTj4qSrvg5b63oBAGhomFQBAGAbsLX2+HjjjTdiB3T97LPPkr7lpqqqSgUFBXrppZdSfh1zy5Ytg66roKBAzrkap2/evDlw1KkvOx21fa10qnFlUm3XW994qqqqJEmnnXaazjjjjJTn3X333TM2nq19vQAANDRMqgAAsIW6d+8uSZo5c2aNn33xxRdq165d0l4qkt9LpGfPnrH/z5kzR1VVVbFvycmEhQsX6oILLtBhhx2mJk2a6LLLLtPw4cNj4+3Vq5ecc+rZs6d22WWXWi8n+jGSadOm1bmHTOvWrVN+zKb63iQhE0jdu3dXVVWVZs+erb59+8ZO/+6777RixYrYbclX7du3V6tWrbR58+a09z7K5+sFAGB7wzFVAADYQp07d9Yee+yhhx9+OOkbc6ZNm6ZXXnlFRx55ZI3fuffee5P+P2rUKEnSEUcckbFxnXPOOaqqqtLf//53PfDAAyosLNRZZ50V21vh+OOPV+PGjXX99dfX2JPDORf7pphBgwapZ8+euvvuu5NuX/R8Ub169dIXX3yh77//Pnbap59+qnfffTfpd5o3by5JNS4rlWi76t9Gc+edd0qSjjrqqHovY1vWuHFjnXDCCXrmmWc0bdq0Gj9PbLk9XC8AANsb9lQBACADbr/9dh1xxBEaMmSIzjrrLK1du1ajRo1SaWmprrvuuhrnnzdvno4++mgdfvjhmjhxoh577DH97Gc/08CBA+u9rk2bNumxxx5L+bPjjjtOLVq00OjRo/XCCy9ozJgx6tq1qyQ/cXPaaafpvvvu029/+1v16tVL//u//6srr7xS5eXlOvbYY9WqVSvNmzdPzz77rM4991xddtllatSoke677z6NGDFCe+yxh37xi1+oc+fO+uKLLzR9+nS9/PLLkqRf/vKXuvPOOzV8+HCdddZZWrx4se6//371798/9nXPktSsWTP169dPTz/9tHbZZRe1adNGu+22W8pjtgwcOFBnnHGGHnjgAa1YsUJDhw7VBx98oIcffljHHnusDj744HTunm3aLbfcogkTJmjw4ME655xz1K9fPy1btkyTJ0/Wa6+9pmXLlm1X1wsAwPaESRUAAAyie2hEj10xbNgwjRs3Ttdee62uueYaFRUVaejQobr11luTPuYT9fTTT+uaa67RFVdcocLCQp1//vm6/fbb07ru9evX6/TTT0/5s3nz5mn58uW6+OKLNWLEiKTjZZx66ql65pln9Lvf/U5HHHGEevbsqSuuuEK77LKL7rrrLl1//fWSpG7duumwww7T0UcfHfvd4cOHa8KECbr++ut1xx13qKqqSr169dI555wTO0/fvn31yCOP6JprrtEll1yifv366dFHH9UTTzyhN954I2mcf/vb33TBBRfo4osv1oYNG3TttdfWeiDcv/3tb9ppp500ZswYPfvss+rUqZOuvPJKXXvttWn12tZ17NhRH3zwgf74xz9q7Nix+stf/qK2bduqf//+uvXWW7e76wUAYHtS4Lb2kdsAANgO/fnPf9ZFF12kOXPmJH11LQAAABoOjqkCAIDBhx9+qBYtWuT9gVIBAABgx8d/AAAI8Mwzz+iNN97Q448/rrPPPluFhaxKAQAAGio+/gMAQICePXtq5cqVOu6443T33XfX+KpkAAAANBxMqgAAAAAAABhwTBUAAAAAAAADJlUAAAAAAAAMTEfXq6qq0oIFC9SqVSsVFBRkekwAAAAAAAA54ZzTypUr1aVLFzVqVPe+KKZJlQULFqhbt26mwQEAAAAAAGzrvv76a3Xt2rXO85gmVVq1ahW7gpKSEstFNFgfffSR9t5771wPI+/QLRzNbOhmQzcbuoWjmQ3dbOgWjmY2dLOhWziapaeyslLdunWLzX3UxTSpEv3IT0lJCZMqgVq0aEEzA7qFo5kN3WzoZkO3cDSzoZsN3cLRzIZuNnQLR7Mw6RzuhAPVZlnv3r1zPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKpkWUVFRa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JlSxbvHhxroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0kVAAAAAAAAgwLnnAv9pcrKSpWWlqqiooKD3AAAAAAAgO1GyJwHe6pk2eTJk3M9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqmTZxo0bcz2EvES3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4WiWeUyqZFmbNm1yPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKpkWadOnXI9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqmTZ559/nush5CW6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSBQAAAAAAwIBJlSzr1atXroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0mVLFu9enWuh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUsW7RoUa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JFQAAAAAAAIMC55wL/aXKykqVlpaqoqJCJSUlW2Nc262qqio1asRcVii6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ez9ITMeVAzy6ZOnZrrIeQluoWjmQ3dbOhmQ7dwNLOhmw3dwtHMhm42dAtHs8xjUiXL1q9fn+sh5CW6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSJcvKyspyPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKpkWbdu3XI9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqmTZZ599lush5CW6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSBQAAAAAAwIBJlSzr2bNnroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0mVLNuwYUOuh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUs+/bbb3M9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqgAAAAAAABgUOOdc6C9VVlaqtLRUFRUVKikp2Rrj2m5t3LhRRUVFuR5G3qFbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDRLT8icB3uqZNmMGTNyPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKpk2dq1a3M9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqmRZq1atcj2EvES3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4WiWeRxTJcvWrl2rZs2a5XoYeYdu4WhmQzcbutnQLRzNbOhmQ7dwNLOhmw3dwtEsPRxTZRs2derUXA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplHpMqAAAAAAAABkyqZFn37t1zPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKpkWVVVVa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JlSz7+uuvcz2EvES3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4WiWeUyqAAAAAAAAGPCVylm2fv16FRcX53oYeYdu4WhmQzcbutnQLRzNbOhmQ7dwNLOhmw3dwtEsPXyl8jZszpw5uR5CXqJbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDTLPCZVsmzVqlW5HkJeols4mtnQzYZuNnQLRzMbutnQLRzNbOhmQ7dwNMs8JlWyrEWLFrkeQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzyOqZJlGzZsUJMmTXI9jLxDt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolh6OqbIN++STT3I9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqgAAAAAAABgwqZJlXbt2zfUQ8hLdwtHMhm42dLOhWzia2dDNhm7haGZDNxu6haNZ5jGpkmWNGzfO9RDyEt3C0cyGbjZ0s6FbOJrZ0M2GbuFoZkM3G7qFo1nmMamSZfPnz8/1EPIS3cLRzIZuNnSzoVs4mtnQzYZu4WhmQzcbuoWjWeYxqQIAAAAAAGDAVypn2dq1a9WsWbNcDyPv0C0czWzoZkM3G7qFo5kN3WzoFo5mNnSzoVs4mqWHr1TehpWXl+d6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPOYVMmyysrKXA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplHpMqWda0adNcDyEv0S0czWzoZkM3G7qFo5kN3WzoFo5mNnSzoVs4mmUex1TJsk2bNqmwsDDXw8g7dAtHMxu62dDNhm7haGZDNxu6haOZDd1s6BaOZukJmfPYoppTpkxRy5Ytt+QiGpzvv/9e7du3z/Uw8g7dwtHMhm42dLOhW7iG2Gz9+vUqLi7eostoiN0ygW7haGZDNxu6haNZelatWpX+mZ1BRUWFk8Qfw58//OEPOR9DPv6hG83otm3/oRvdaLYV/xTQLVd/6EYzum3bf+hGs639p6Kiot75kS3b72eEpM5bdAkNzrtN3pXOzfUo8g/dwtHMhm42dLOhW7gG12y2pAmSjpfUzn4xDa5bhtAtHM1s6GZDt3A0S9NCSf9N76xbNqnSVlKXLbqEBmd5y+U0M6BbOJrZ0M2GbjZ0C9fgmi2J/N1OW3S7G1y3DKFbOJrZ0M2GbuFolqYN6Z+Vb//Jsh+3/3Guh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSRUAAAAAAACDLfpKZZ0pqUfGx7Rd69SkkxZtWJTrYeQduoWjmQ3dbOhmQ7dwDa7ZVElj5T//vgW7aze4bhlCt3A0s6GbDd3C0SxN5ZLGKK2vVGZPlSw7oPUBuR5CXqJbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDTLPCZVsqx38965HkJeols4mtnQzYZuNnQLRzMbutnQLRzNbOhmQ7dwNMs8JlWyrGJTRa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9jqmRZIzVSlapyPYy8Q7dwNLOhmw3dbOgWrsE1y9AxVRpctwyhWzia2dDNhm7haJamcnFMlW3VFT2vyPUQ8hLdwtHMhm42dLOhWzia2dDNhm7haGZDNxu6haNZ5jGpAgAAAAAAYMCkSpZ9UPFBroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0mVLPt2/be5HkJeols4mtnQzYZuNnQLRzMbutnQLRzNbOhmQ7dwNMs8JlWy7LgOx+V6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPOYVAEAAAAAADDgK5WzrGtxV32z/ptcDyPv0C0czWzoZkM3G7qFa3DNMvSVyg2uW4bQLRzNbOhmQ7dwNEtTufhK5W3V3iV753oIeYlu4WhmQzcbutnQLRzNbOhmQ7dwNLOhmw3dwtEs85hUybJ+Lfvlegh5iW7haGZDNxu62dAtHM1s6GZDt3A0s6GbDd3C0SzzmFTJsjWb1+R6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPM4pgoAAMC2KkPHVAEAAAHKxTFVtlW/7/H7XA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplHpMqWda4oHGuh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUs+2TlJ7keQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzwmVbJs9urZuR5CXqJbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDTLPCZVsuzETifmegh5iW7haGZDNxu62dAtHM1s6GZDt3A0s6GbDd3C0SzzmFQBAAAAAAAwYFIly55a9FSuh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUs69uib66HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JlSwb2GpgroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0mVLNvoNuZ6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPMKnHMu9JcqKytVWloqnSmpR8bHBAAAAEmaKmmspHMldcnxWAAAaCjKJY2RKioqVFJSUudZ2VMlyy7pfkmuh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUsa9qoaa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JlSybvmp6roeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0mVLJuyckquh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUsO7XzqbkeQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzwmVQAAAAAAAAyYVMmyZ757JtdDyEt0C0czG7rZ0M2GbuFoZkM3G7qFo5kN3WzoFo5mmcekSpZ1b9Y910PIS3QLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZx6RKlu1dsneuh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSRUAAAAAAACDAuecC/2lyspKlZaWSmdK6pHxMQEAAECSpkoaK+lcSV1yPBYAABqKckljpIqKCpWUlNR5VvZUybILul2Q6yHkJbqFo5kN3WzoZkO3cDSzoZsN3cLRzIZuNnQLR7PMY1Ily1oVtsr1EPIS3cLRzIZuNnSzoVs4mtnQzYZu4WhmQzcbuoWjWeYxqZJlM1fPzPUQ8hLdwtHMhm42dLOhWzia2dDNhm7haGZDNxu6haNZ5jGpkmXvrXgv10PIS3QLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZx6RKlv1ih1/kegh5iW7haGZDNxu62dAtHM1s6GZDt3A0s6GbDd3C0SzzmFQBAAAAAAAwYFIly577/rlcDyEv0S0czWzoZkM3G7qFo5kN3WzoFo5mNnSzoVs4mmUekypZ1q6oXa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JlSzbt2zfXA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplXmGuB4A6jJY0X1J3SdHjCW2U9IKkWZLWRE67Lusj23Y8K+lTJTfKhqWSnpf0raQNObh+SBMkvSmpVNLFxt+Vtq3nz3WRv4dKOriW8yyXdE/k38dI2nMrjynXvpf0F/n7+ULxVsC2Krq+Ok3SzjkeC4Dc+1R+PbtCUpW2bH2Vq229LbFa0n8llUtaJ9u2SjrelfSBpEpJTtIZknpuhevJttq2s1Od/scMXN/fJH2j7aff9mippP+Tfy6dr21uFmMbG8727/by27fsAj6SNCXy746K34Ovym/QLpO0XlIrSbtIOlBSy4TfXyC/kvtW0lpJTSV1lnSA/IIpaqb8gnqxpM2SdpB0iKQdE86zQdIbkr6QX5g3ln+gD5S0r6SCLbqlSba42yeSpsnfnjWSmsvfpoMkdUo43+eSPpTvtD5y2qmSele7vFckzZPv30VS+zTH8Yqk6LeY7SDpnISfvaZ4y83y91vPyBjL0rx8RX73bemme26SKiS1kNRf/kV6cT2/WyXpHUmfyf+uk1QiqW9kHOkuMdK9nHQft9kQGfNNf7nJbwRab7vknxtvS5ouf/uLJLWTdLikrpkbcsxi+efrN5JWyj/32kjaR9KgWn7nA0kvRv7dQtLlCT9bL/84nSY//qaSdpX0I0nNIufZKOkZSQslrZZuanyTv//6yt9/RZHzrZQ0Tn6ZsyJyWn9JP03ztr0hf1/8UPEJlXQvM3HyrLqr5ZdZUnxCoLpuks6qY2yfSPpPHT+PbpzVdvmSbiq7SRoZOF5JWhQ573z5DfYWkfGemHCepfLLlXmSNsk/BveXtFvCeSbKr1NWRM7TPHI5Byp52ViffSNjmaCtPqmyxeuDBqrebiHr9Ew8/hIniFNJnFy+S35ZVN0ASSdE/j1ffrm2QNIq+WV2u8jY+yb8Tl3P2wsktY38O7J+umn5Tf42hq6f0tmOkvx68l1JSyJj7inpUPlleNR6+efWdPkX69H75QAlLxei6lq+12e9pPvl7x9JOkp+XRL1onzrxfLL5hSXf9u023zjKvluJZHzbW3p3rfpWCTpZfnbuVZ++6mNpL1U+3o10Vvyz6UC+e3s0mqXneL5c/tJkefoRvkJmW/ln0dSzW1GyT/WX438u7X88rtY6W/zStL78tu9yyO/u4ukYYo/xisjY1kUuaxC+W3S3SUNUXy9nO523yr55czsyL+L5LvureSutW1npzj99vLb01tG1Ga2/PZTJyVPqLwp/zz+Tv45LEn/o/j2jZT+cucz+fXtcvnnWPPI9e0vqUc94wvZNpDqfg5Hthdv+vYmf9/Wtb24WdIk+QnNpZHb1kZ+ubxL5Dzp3p+Sn2B8W/5xvVH+MbarpCNquW3VtY2cf4b8Y3yfus+ebbznl2W/3OGXW3YBiyN/d5X0G8UXsO/KP0hbyj9RV8g/yR+RX9BJfqXwiPwCYoP8AmqDpDmR01dHzveJpCclfSW/gG0h/0R4WH6hE/WC/AuvZfIL8+LI+F6NXHcGbXG3KZLmKr4yWCm/svu74gsdyS8cv5JvWJfo/fBDSedK+nEaY/hS8QmVVObI3x9t5Ve+FZFxP5rGZSf6j6Q3pIIVBf5+WS2/UHxC8cdCbd6UNF5+74CWkT9L5VeUrweMId3LSedxmy3RMS/Wlt32jZLGyK84lss/3koil7u01t/aMt/Kr/RWy9/nm+UnO56TH391i+U3TGrzpHyPZfKPxw3yE7qPKr5hsUl+j7nGktpLzZs29y8K3paf8IhaJf9CQAqfxl8lv/IsUPJEQOhlRjcoE/+kmvRtXe08Heq53BYpLjfxxVb03+1TnC+y9u3QJsWV1Dfe+fLvqs2Qvz86yG/IzEw4z0pJD0XO4yJjWSTpX5ImV7us6OOmtXzbz+WX9xvquf2JdpaffPtW/rG3FW3x+qCBqrdbuuv0TD3+ClXzcZ74ordVijG2q3b+xImHL+WXCxsip6+X9LWkp+VfYFbXJMX1Jy5PIuunNmVtwtdP6W5HTZafnF4k38jJN/u7fENFrusJ+fV49Lm6Qv6FTKoJhPqW7/V5UcnbRdV9Ghlbs9rPcmSjI+ONzpDfVt2l9vNnXH33bTqWyy/Pmsm/ibk58v/n5F8g1ye6ndhf0q8knRz5fx3Pn9hzdJOkqfKP5breDPs+4d/nyXfuovS3ecdLekl+3V0aub4p8tsw0eX/avlJjCaRsTaSn2R4Vf45EpXudt8/5bcnKuWfz43lJySeU/IypLbt7BSnJy3b6lpG1ObDyN+7Vzv988htqOs1QbrLnW/kn7clkTGukd/uf0x1P98SpbstU9dzOLK9WLymuO7tRSfpH/L38+LIuMsil5u4jk/3/pwmv/ybGxlz+8jfs+u91ckGRP7+KPD3soA9VbKsbVEt0+Rr5WeCZ8k/aQ5IcZ7EWdhv5D8qEN2d8AD5BUwL+RXZP+UX2IvlF36dI/9eF/n9o+VfpERn9TfLb0y3UHzhsoOksyP/Hi2/cTBe0s8jp30V+Xtn+V2+N0q6VX5lsCJh3OsVf/erQn4F0UV+BVMUGe8Hkj6W35grkrST/Ds1rf1F1Not0cfyL+hWys80j1D8nYFd5RfI0ZnuSfIv/DZGxjUkcvoBkev9Wv7JX131d9beifwZKOm4Osa2Rn731dbyLzxSveg4S8mz32PlV6pLFX+nQZH/T5Bfya2Vv892kzRcfkE21Z/t0FMP1Ss7v+IXak/Kr8i/kNSvjnFG79N28rvWucjtXaHk+3ST/Mb3VPkmhfIz7sfLN0/3ctJ53KbjC/l3RJdHfmeE/EaQItc9Xn4BXCV/XzWp/ba37dxWS3+11H7bJ8nfDy0lnRlpoMh1b0q4nEWKv/vTTtKRtdy2efIryGXyz5tUy4ZS+b00+spv8KyQdJ/8c+8z+XdCEsf/jPxjrVvk8hMtlt/4l/y7Bz+Qf8yNityu6fIbHk0l/UGxtciFO16oWy65xV/31wmX107S7+Qfv7W9k1Sbz+W7dVXyZEXoZfZW3c/PqAMVtnv6Lqr5QuEv8svSnRRf3lSfdF0g6QH/zyGHDdF/qr8yqmu8Tv5xs0l+4+JoxZcb6xPO97b8xnAT+Y3tEvmNvBnyz5Xd5e+7E5S83Bkv/w7rWvkN7S7y98F4+Y2ilZHzt5Zf7uwX+b3G8uuCafKPuXSfuwZprQ9QQ73d0lmnZ/Lx10o133l/QX5501TxjedER6n2XfM7SDpdUq/I/xdIejAy5s+UPDEr+cdoXR8liayffr3br3XT3JvC1k/pbEdtkm8h+WX3SfIvTP5Pvt3b8uuFLxTf0+0kSX3k9y54SX499EP556lU//K9PtPkJ036Kz5xXd1v5dc50Y/kVDdBeu7NhG8W+b/I39E99+bKbzctiIy3TH7ZOzDNMda1rRdV3327KXIZn8k/tpvKL8sPVXyPmt6SrlT8ResySX+O/PtrpX58Rl2X8O9pkT/d5bcJ6nj+xJ6jxZIulX+O1LanY/X+/xv5+yKlt827SvEX0UPktyEXye/hsER+m2lf+efVlYrvDbFe0p8il5W4rk9nu88l/M5ekTEmblevUN3b2Z+mPr3tJQnLtrqWEamsk5/ckGquz38mv+x6Q7XvKZLucmeYkvfImCw/8bBJ/jVB6zTGms62TH3P4cj24sVHXqxb5t9S+/biNPnXD0WR2xfdw84pPuGWzv2pyPlfiJx/P/k99hIfT1FL5CeEv1F8b56O8ns6Rffy3ln+Ofmd/LK4vje/sohJlSz7cu2XqX/wnPzKWvIP4FTvMnSWX4itkd9Qaa/4Ozk/SjhfI/mVafTyog/cDvIrjnWR60vc3XSIkl+EJqr+DunmyGV2l3/yzJF0r/wTYJP8Ey96/KNN8jPe0UmE0sjvzo38rEj+BWN0xrG94u+UfiXp15Ja1tEtaoH87GuZ/AuAOZKekp/FLlB80iQqcffbxN3m6tulN/rO2iL5DtHdWuubCf+v/EbSLxXfVbO6IvnJpU/lX9Asi5zeXvF3hJbKL6zXyd+utpF/R/NEVwySmg1o5i+nd2TcmyI/r2tSpbv8RtgS+Y0HJ79Q7CC/EIz6h/wEoOSbNZW/v9bI38fpXk46j9v6rJJ/97N15Hq+kZ/5v0D+efKB/MaT5O+r6DsK1UXGvHTh0i277dGVWGv5ibHvI6fvo/iuihslPS6/UdhI/rH0RIoxrYycvlH+8bFWfsO+up2q/b8scp2LVbPj6/IroxOV/C5CVOLzv6Da35J/rO0eOa1QflL2O+nu1XfHJzcSn19FSn7RHiK6kbZDtdNDL3OG/P0S/bjjIUr9guhl+c9qt5JveojCPoY2W/F30far43zRd/dKpeIBxcnvNtY33u/kn1dRo+SXvZ0lHab4i6vosqCb/ONe8i/cZsg/VhfI309FkdPeiVxOdG+q5orvNfBB5OcF8s+HjZHbOava7dxBfkPsK21V9a4PkFK93dJZp2f68ZdojeIfb95Hqd+h/4f88rtU/oXjgfLPE8m/iEjUWX4dsF6p1yffSrpRfjnWUf7jRokvxiLrpy/Xfhm+fkpnO2qB4sfGi66XS+RfPHypeMPo34WKfxS5r/ykSvTn0e71Ld/rUiG//Isuc2qbVKk+gVFdiVTSqUSViyr9/zvJ9yqOXGZ0HVYov4ypkG+RzqRKfdt6UfXdt0/LL6+jy7QV8o+9byOXVaT4dtPoyHUtS/j96o/d6naQX65vkF+Wtpbflqvn+RN7jjZS6j21ErWJXG50j4ToejK6TZ8o1Tbvl4rvTRR9/HWKXO4y+bb7Jpz/cfntrRXy64Dql5vOdl9B5HfK5SfHvo5cpuQnC/eQb1bbdnYtpyct2+paRqTyTaRDU9X8eFh9j3Up/eVOkfztjU5uRR8H0Y8xpaO+bZl0nsOR7cXy9eX+H2VKvb2YuC07Xv6510J++y/65l4696fkH2trI/9eJelO+ebd5Cfzosv6f8nfv00Vfz04R75xdFIlusfUd/LbGkyqNFyvLX2t5onLFF9R7yc/U75E/h3PRCcrPjNd1yz8BsVnc7sp/oBrJv+i/kkl775VquTPWPaP/Oxb1Zxt3Cy/EdBKfkbSRa4r+qKgsfwKLLoAm5ZwPYcqvgG+WH4Bs1zxCZVj5Z+A6+U36CrlN+YPqaVbos3yEzAdIpf3fOR656jm8VCUcJ3NVPckQ3XRd9ai744PUu0HFI2aLH//HqL6j6dRId89qpP8THl0Y+Ft+UmURvLveERXaAsTfj/inQ2RtyAaya/UK1X/O/oHym9EvKPkDYgOir+4LFd8UuEH8scJie4dEd0DJJ3Lqa62x219Nit+cMzorpQrI5e1j+IvYHeUf6dss6S/quZHcTJ126Mryq/lu7eInPZS5PL3k383ILpr9ynyj9HouxaJPpRf+RbIP+46KL4nQV3KFX9O7pVw+lz5z/QOkn/cp9robh+5nsWKT3iuSPj5ymrnXyxpgbQuuhvcAKX/+dj6RO+jsi24jAL5+6+R/P0wW34Ff7aSN0YK5Z/f6+Rv7+TI+X6r1Hs2pRL9eF9Hxd+1qm65/KSxJP1QGr9ifNh4EzfIP5PfCFwvf5+Pkf9YaGvFn+stEs6f+NxLXBasUvJyp0x+uRPd0Ik+H/aUf2dVketMHIsU3wBdpq2q3vUBUqq3Wzrr9K3x+IuKLu8ayy9fq2si/xxdLf8Ye09+o/qXSv1h9qmKvwua6hgYLeW3Q5ZExl8u/7iv9m71a0tfC18/pbMdldggVaeKan83V/x2pmqZzvK9NlXybwJslt97bUsODrCXfJ+/R/5/kuLvwv8j8ndr+futlfx6Md1lRrrbenXdt+WKf+TgDPnjWayUv5++l39cRx8vTsnLxkbyk4fV93qq7hzF9zBJ3Lsg8eMgKZ4/L5e8nP5E/lD5SYXojo7V9/pKlGqbt67H3zLVfI4uUPwwAZLflkmcVE93u+8k+RfPc+VfGEv+ud1J/j4rVu3b2UNTnx5btoUuI6T4dkapMnMsyLqWO+uU/HhqLj8BWpbG5da3bRD4HI41K1fq7cXosn6x/H3SSr7Vm/LLr6MiP6/v/ky8LMkvR9vLbwvNkn/+nie/jok+bn6m+OubVB9jKo1c11be1gjFpEqWndv1XN0076bkExcn/Du6sGsnvyET+tn01fLvbH8XuYwTE362QdK/5R+gh8kfROgj+b1i/ik/C9xZfiHZSH7WsUL+yd5H8RV09Ik6Uf7J0U1+wme1/Erkw8h5jlB84dFYyTPn0Q2SBQmn/TvyJ1Hks8c7vr6jlsxMeFYOUvKTv0PCZe4mv6KVfNvEFe1m+V3QJss/6U9S5g6I+pSSX3AOlb8Px8nP4O+f6peqOVT+3bFl8rehXH4h+XP5ptGePZT8DkGKd93P6XqO37VPqvmuWaqx7iK/kn9P/rEQ/ZjXo/IbAhvlJwASVwj7K/54KEs4PZ3LSVTX4/ZNxScyFBnn0IT/N1X8wJjR4zqsU/zjbpE3y9RH/nHYOHIZE6uNITLmNh3baNkpy+y3PfrOTzP5b6spjlzOl/KThPsp/pwvUvzx2V81J1Wi52un+OO7v+qeVJklv4JzkgYr/jyJPv/byk8G1aaR/MGZX4uMebn843dJ5N/VV9TnSNokne5O16N/ftR3bK3kvXusohsm6U5qVDdAvkH0o3PRSbfN8vfFMZHTD5dfyRfKd3tdfsNwhfyEaDrvoC5UfFf7fes436TIdTSVNKjaOiGd8VYlXNaekdOWy78zGP08fG0TvdWXA1H7yK8PKuT3pIu+m3y24gcu/EB+uTlb/jG0g5KXwVJ8EmadtqqU61HUq95u6azTt8bjT/IvxqLHbYl+NCjRifLrueieff+Rf/HyjfwEdvdq55+s+HbAYUo+eHJPSZcovgfNQvkJgE3yDapNqpxacqru+cs9YeundLejUqmrU23nSXf5vkB+GyjROfIfJ5ovP2naTukf46EWI9qP0ON6PPnE1YpPLO2p+H1cqPj6LTpJEhW6rZfOfZu4Hh+TYvDfKP5iuEj+ozzr5dcF/5FfRraJXFZtPWtTz/On/5z+emePd1L+qollm7e2x9/lfowql9/GiG7jRe+fdLf7Xpd/Ad5P/vH2nfxxit6UXy9W38smDed2PVc3nXhT2DIiaku3MxLVtdyR/GP0Ovk3Mt6Wf96NlZ/0KavjctPZNgh8Dp/b9Vzd9PJNqbcXpfhjtUB+IrO1fNNP5Jdrh8tvT6dzfyY+7g+WX0bOl1+/rJR/bu0p/5yaJv+8bCO/XbaTan4sO0vbGqGYVNmeLJHfPW+5/B4Rpyh59vkzxScx9pRfgOyp+EeNvpRfIBXIvyhIfGHw38jfzeSf1Bvkj+sh+SdSi8ifHeU3Gix7Z0d3EU0UeedzyYIlySvC6guqdKyXf6EwR36sP1PNjxVsiYVKntmPzpRvkF+o3xw5fVPk7wXyu6eerfhHryS/Qmgn/znp8sifL5XebU7YVXFNZWS/4irFd7uL/jzVWCX/QrpKfsFfFjltZ/nZ6ZD7NORy6nvcLlPyfd9OW0dkzDvtvpOWlUUmVSy3vUR+w7Gt4u/udolcRoW27gF4P5Tfu8QpvuKKWq34x41uj5y2OeFnN0r6ifyGf6mSj5a/Uf4z1FLq/oVSt57d/ITPJPmNhf215Rsp0RVnqo9rpaP6WHeWX4atVfLjP3FSskB+Aya6XVvf3l1R0b1USlT7u5hr5TdIJD+JUf3jDemMtyTh59HlV2v550x012zJ34fLlPzOYuK/q+/WXCD/XD1AflIl+o7t3pFx/Ep+D5tFkT/l8i+goxOHUnzjtK4DK2LblO46fWs9/j5N+HmqScnEdXVj+WXN1Mj/E5+jTn5vvrfll3XHqOYGeVm1/3eW33ivvl6UpCXSw/c+7J8PIeundLajEhuk6lRa7e818uuPRqrZMt3le5Nq441aFPn7JcU/VhQ1Tv7+OVtb3/fasm29smr/r+u+lVJvA6aadCiW35N6knyrt+RfAK5X6p61qef5U7Ek3RVOGurb5q3++GuT8O/qP49qIn+7d5I/NssExV+Ip7Pdt1TxvWYGyG8jdZd/3nwXOZ9hUkVS+suI6rZ0O0NKb7mTqKX8Ntr78m/8fSR/zJXapLNtEPgc/vj1j/0nF1JtL0r+sbpM/rET3dNsB/ltmKrIuKuU3v2Z6nGfeH+tiPx9nPw2aLn8smC24sexOirh/NvotgaTKln20pLqj3Qlfx3vDPkH2hLFd6NKR7n8Z0TXym8QHaeaxxxInNFbIL97euKeItEXQKvkV97RdwPKFf+c827yGwsbFX9xGL2M6OfsEy9rB/kXepvlV0bRDYzv5Z+kiS9m9pCfSJD8k/wrxZ4wbc9pq69W1vFB/cWRy2yv5M8QRm9DpfwL9+8i5zlVW/aRglQuTnFa9J3rzYpv4EQ5xTsulR//LvIL5OhnhaOin1+Nfk63XH6iJvpxokXyk1I7yy/YJY19Y6x/h3224hM50Q2UVGOV4o+RRYrfv9F3jRLv06h35WerCxT/6uDmaV6OlN7j9jjVfWCudfKz5L0if0evu4P8Ar5E/v6fKf/42qzURxuP/N4Xc7+Ir9Qtt30n+XcrlkYus1jx50gb+fs3+rjcKH8/76z4R0ISdZDfcFmi+OM71fmc/Ltn78lvTByjmkexj6pS6omd6s/ptpGxV0UuO7oSi35++Ev5vpHPAj//7fPxg+k5+cfclk6qtFHtG8PpeEd+mVUW+f9cxScYo6etkt/w2kvxFXTiMiR6vkrFD149TMlflbgi4Xd+qNqPt/Ch/IZbY/l3hVRtnZDOeHeIjHO94o+rFYpvCEc/E76z/DtYX0fGXqL4x0yby99va+SfC/0V3xpIfG5ElzuL5DesosdAWinpjsh1LlX88+DR+6n659IzLOV6FPWqs1vIOj1Tj78op/ik5C5K3iZSZAzfKH5w5SolLwfLIn9vkn8n9bPIGE9U6o/hfSC/t2d0ObxI8d3fyxLOVy7paWnF2hXh66d0tqO6KP7C6HP5FyWVin87UOIemJMjt2+2/IuOaMvE80n1L997KvkgqqnOV93mWk6vw/sV79c8sYV83xXyLQbJv7jcLL8c6aC6Dy4r1b+tl859m/jYO0D+uBuKjONLxV/ATo1cVvQF4RLFP24Q7VFfz+rqef4saVX9M5VG6Wzz9lR8e/Nz+T3UFil+G6OPqxmRy4h2WaXkZURUOtt91V+H9JV/nqyodr5AY6eN9dt49S0jUolOJlm3M0KWO3sofhtTrWslv51RKd8mOtGSzrZBqsuKSnwOR7YXX37v5bq3F3eSX25F9zArU/x+L5Lf0yzxdWpd92dP+WWei5xvZyW//oyuN76KXEb0INBvy+8JM1/JsrStEYpJlSxr3rh5zRPbyi/Uv5B/4nwh/4AJ+Wzfo4q/aK9Q8i6N0Y927CL/gjt6UMy2in+WsFjxFUuF/MFQy+QXuNEFbGvFd+ttIT8bOV9+QfKt/EoiumEV3V1+N/mZ2IXye8R8ELnM5fLf3hHddfBj+ZnUSfJPworI5R0jqVMt3RI1lj9ORmvFP7sXnWSQYgfTlOSf1P9M+N3Ej5NMiox3U8LP/yO/AOkn//GcEKlWuNHP2e6g+G6i8+Q/ktNE8a81jbYsUfwAawfIPz7Wye/S2i7y7+byn2XvIt98mvTNv7/xC6To/bej4vdxbfrJz2bPl3S34hMGUvw+7SHfbJZ8q+mKfxbynMhY0rkcKb3HbX0ay8+2t1b8trZMuJ595R9bX8l/ZnqzUu8yGBnzqi9XbdltP0B+Zb5WfpfeYsV3w4w+zgbIv8OzMjL2Nko+bknUPvK7LG+U/7aY0lrON03xFyXFkbElbtOeI9/numq/Fz1GUwv5XXujPon8aaP4iwPJTxhEJ/Lmy+/e2VxSK2n6iunxiZddFN9NtVL+MS/FP3I2W/HjDFyU4vZEdZdvvKDa6ele5ofy756VKv75ekX+HZ3A3Si/bHotcns3KP6RsXaKT55EN/ylmo+fSfIbccWq+ZGYqMSPNwxQbNf3pGVbOuMtkj8S/svyL7S+km/g5B/30evfX/5xsUb+GFXNFH/s/Eh+7b9S/jHw38htT/y4XJOE2z5dfllSIv9YiT4vipT8jQXRd2wTP5q4FdS7PkBKdXZLd52eycdf1EzFn1up9lJZLf/RyBfkH6drEsbVU/7FoOSXlZ9F/t1Efnsn8iaDWin+lbbT5ffoaym/nFqi+B4giR/Tja6fouuCMQk/q2/9lM52VKF8i+flX7jeLb/e2BAZV3Qsu8o/p76SfxOijeK9Big+SXBdtTHUtnxPpfrk0HLFl6dHKX6QdckveysVvw/WJJz3BEldpaaNajky6KHy21/LIr/TWr7VHkrvWFz1beulc9/2VPxNmKfkt4UbKX4A1jMil/+x/EczSuWX7dHLktL/pqLq6nn+dNuvmxZuisxERJtG13GLEk77hZLf/a8unW3eVvLPt3fknzuzFF+2t5HfS1Hy25xPR87fXP6xF91GTuyQznZfJ8UPrvu2/ON+leLbDsauhWsK01tGpNJN/v5fJ/+4bJPws2fkJznXJpz2l8jfh8rf5nSXOy/K3++tlXzg40ZK/iapZfLdEj+in862QbrP4XS2FxU5f/Tji/dFbkv0evdX/Jsv07k/S+WPk/W+4t8kGN0ubq/4oS/GyreO3s7opH7i3vwbEk7fytsaoZhUybKhrYfq3RXv1vzB0Yp/X/c6+ZXuTKX+GrVUEveCqL4rYnTB0l7+4KbvyL9IWSq/su2m+AJWkdN6yC+Q18svuHeNnCdxm+xkxSeBKuVXdjvIvwMbnfUslF9BvSn/ZKuUfxLvpPij7yj5Fy+fRMZUKL8hslNkHKqjW1SXyHW+Jb+A6iH/NXvRianESZLqbwQk7la3VjU/h7iq2t9bQ6l84wWKj6+1fIMDFf8YSVv5Bd4E+YmY6H24U8JlHefPVzq9VBXLKvzP+8kf46KeA1fpx5Hr+Ezxib2O8u8oJW5UnajkrxXeKP9iu3ng5aTzuK1PS/mNsegxGLtGrj86Q/4D+fvuI/nnVv/IOKs/nCJjbjeznZZ8v8R+21vLfz72Nfnn7xr559iBih8/pUh+V9z/Kr7hc7L8RnyiVvK7m78kv6JtIv/Vzf+odr7Ex/caxSdBrHaQf4diufwGWWf5BoMSztNV/nn2vf/TqKiRNnfc7B9riS+MNqvmc2qD0tvVtp/8hNg38o+H6G736V5mdIJrsfxjoEzx5V30ed8icr65it+f7eSfj/up/m8ZWie/cSwl7+1S3VTFlyEJfZKWbemMV/J7UhXLT+ZElwF95N/VijYqkX8cvi7/7utK+Q2gfRVfPjeVn4T9Vv7xVRX5vR6RsZRFztddfmL8u8jYmshvqA5V/JvJNiu+d13iBuJWUO/6ACnV2y2ddbqUucdfVHQDfwfF1vlJ2keu80v5F75Ofk+EAfIvKFKt51cq+UVJ4kcZfhAZ/6KE8XeRX0Yn7o0YXT85ha+f0t2O2lv++fSe/LK0UPF3qKMvmhvJry8myC8flkVuz8DImLNthWp+5Cq6PI68Gz6w1UBNSzoqa0R/+eVO4nZomdL/9pP6tvXSvW+jj/XPImMvln+c7az4Xi67yq9Tliu+7RrdJrBOqkh1Pn+GdRumD+ZFZt+rr+MS13vV93yuLt1t3h/Jb0N9JP+4aip/Hw1TfBtqp8jPlij+JRM7yD//fpBwWels9zWWfx3ytvz6YkXkenrIr297y+SQPodo2pBp9S8jUokel2+W/OuuxI8fVarm/RD9f3TiIN3lzh7ye+5VyN9/LeW3o/ZT/V9ike62QTrS3V5spvg3ls6NjLuzfM/o4z/k/hwuv00b3ZO7lfwE38GKvx7cM3I5y+Wfey0jl5H40ag58vdvR21T3/wjSQXOORf6S5WVlSotLfUhe2R8TNu1P/T8AwfYM6BbOJrZ0M1mq3X7p/y7j0co9pGZ7cl283j7Qv5d367a6sde2G6apWuq/Dt45yr9F58pNLhuGUK3cDSzoZvNFnebLf9xqc7yxw9rAPL2sfaU/PbGjxXfm2prKpc0RqqoqFBJSV27hzGpknXNGzXXmqotfRu54aFbOJrZ0M1mq3X7Xn532zJJF2jLvupzG7TdPN4ekt+V/XTV/nXSGbLdNEtXhiZVGly3DKFbOJrZ0M0mI93+Lr8nyRmKf+R+O5aXj7Wlkv5Pfg+gC1T78esyqVxpT6psZ5un275TOlf/Llmkg27haGZDN5ut1q29pGvlj5OyHa6xtpvH2y/lj+mwlSdUpO2oWZbRzYZu4WhmQzebjHQ7S34d1gAmVKQ8fay1ld8eHKnsTKgE2g43UbdtHZt0rP9MqIFu4WhmQzcbutnQLRzNbOhmQ7dwNLOhmw3dwtEs85hUybJv1n1T/5lQA93C0cyGbjZ0s6FbOJrZ0M2GbuFoZkM3G7qFo1nmcUyVLCstLFXFJuuXoTdcdAtHMxu62dDNhm7hGlyzDB1TpcF1yxC6haOZDd1s6BaOZmkqF8dU2Vad1+28XA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplHpMqAAAAAAAABkyqZNnry17P9RDyEt3C0cyGbjZ0s6FbOJrZ0M2GbuFoZkM3G7qFo1nmMakCAAAAAABgwKRKlv2ozY9yPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKoAAAAAAAAY8JXKWcZXWNnQLRzNbOhmQzcbuoVrcM34SuWcols4mtnQzYZu4WiWpnLxlcrbqmPaH5PrIeQluoWjmQ3dbOhmQ7dwNLOhmw3dwtHMhm42dAtHs8xjUiXLujbtmush5CW6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSJcu+2/BdroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj2OqZFnzRs21pmpNroeRd+gWjmY2dLOhmw3dwjW4Zhk6pkqD65YhdAtHMxu62dAtHM3SVC6OqbKtGtl9ZK6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JFQAAAAAAAAMmVbLszeVv5noIeYlu4WhmQzcbutnQLRzNbOhmQ7dwNLOhmw3dwtEs85hUybI1m/n8mgXdwtHMhm42dLOhWzia2dDNhm7haGZDNxu6haNZ5jGpkmVHtDsi10PIS3QLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZx6QKAAAAAACAAV+pnGXtitppycYluR5G3qFbOJrZ0M2GbjZ0C9fgmmXoK5UbXLcMoVs4mtnQzYZu4WiWpnLxlcrbqmFth+V6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPOYVMmynZrtlOsh5CW6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSJcuWblya6yHkJbqFo5kN3WzoZkO3cDSzoZsN3cLRzIZuNnQLR7PM45gqWVZUUKSNbmOuh5F36BaOZjZ0s6GbDd3CNbhmGTqmSoPrliF0C0czG7rZ0C0czdJULo6psq26vMfluR5CXqJbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDTLPCZVAAAAAAAADJhUybL3VryX6yHkJbqFo5kN3WzoZkO3cDSzoZsN3cLRzIZuNnQLR7PMY1Ily/hOcBu6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSJcuObn90roeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0kVAAAAAAAAA75SOcs6N+mshRsW5noYeYdu4WhmQzcbutnQLVyDa5ahr1RucN0yhG7haGZDNxu6haNZmsrFVypvq/Yt2zfXQ8hLdAtHMxu62dDNhm7haGZDNxu6haOZDd1s6BaOZpnHpEqW9WnRJ9dDyEt0C0czG7rZ0M2GbuFoZkM3G7qFo5kN3WzoFo5mmcekSpat3LQy10PIS3QLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZxzFVAAAAtlUZOqYKAAAIUC6OqbKt+kPPP+R6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPOYVAEAAAAAADBgUiXLPqr8KNdDyEt0C0czG7rZ0M2GbuFoZkM3G7qFo5kN3WzoFo5mmcekSpbNXzs/10PIS3QLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZx6RKlp3Q8YRcDyEv0S0czWzoZkM3G7qFo5kN3WzoFo5mNnSzoVs4mmUekyoAAAAAAAAGfKVylnVv2l3z17HLVSi6haOZDd1s6GZDt3ANrlmGvlK5wXXLELqFo5kN3WzoFo5maSoXX6m8rdqj1R65HkJeols4mtnQzYZuNnQLRzMbutnQLRzNbOhmQ7dwNMs8JlWyrH/L/rkeQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzwmVbJsXdW6XA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplHsdUAQAA2FZl6JgqAAAgQLk4psq26vIel+d6CHmJbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLPOYVMmyooKiXA8hL9EtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplXuEW/fZSSU0yM5CG4tOiT6UFuR5F/qFbOJrZ0M2GbjZ0C9fgmi2P/L1kyy6mwXXLELqFo5kN3WzoFo5maVqa/lm3bFLlv1v02w3SjJ1mSF/mehT5h27haGZDNxu62dAtXINsViB/XJUt0CC7ZQDdwtHMhm42dAtHs8zbokmVN998Uy1btszUWBqE77//Xu3bt8/1MPIO3cLRzIZuNnSzoVu4hths/fr1Ki4u3qLLaIjdMoFu4WhmQzcbuoWjWXpWrVqloUOHpnXeLZpU2WOPPeo9Ei6Svf/++xo0aFCuh5F36BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czdJTWVmZ9nk5UG2W7bLLLrkeQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzwmVbJs+fLl9Z8JNdAtHM1s6GZDNxu6haOZDd1s6BaOZjZ0s6FbOJplHpMqWfb999/negh5iW7haGZDNxu62dAtHM1s6GZDt3A0s6GbDd3C0SzzmFTJsoKCglwPIS/RLRzNbOhmQzcbuoWjmQ3dbOgWjmY2dLOhWziaZV6Bc86F/lJlZaVKS0tVUVHBgWoBAAAAAMB2I2TOgz1Vsuzjjz/O9RDyEt3C0cyGbjZ0s6FbOJrZ0M2GbuFoZkM3G7qFo1nmMamSZZs2bcr1EPIS3cLRzIZuNnSzoVs4mtnQzYZu4WhmQzcbuoWjWeYxqZJlbdq0yfUQ8hLdwtHMhm42dLOhWzia2dDNhm7haGZDNxu6haNZ5jGpkmUdO3bM9RDyEt3C0cyGbjZ0s6FbOJrZ0M2GbuFoZkM3G7qFo1nmMamSZTNmzMj1EPIS3cLRzIZuNnSzoVs4mtnQzYZu4WhmQzcbuoWjWeYxqQIAAAAAAGDApEqW7bzzzrkeQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzwmVbJs1apVuR5CXqJbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDTLPCZVsmzRokW5HkJeols4mtnQzYZuNnQLRzMbutnQLRzNbOhmQ7dwNMs8JlUAAAAAAAAMCpxzLvSXKisrVVpaqoqKCpWUlGyNcW23nHMqKCjI9TDyDt3C0cyGbjZ0s6FbOJrZ0M2GbuFoZkM3G7qFo1l6QuY82FMly6ZMmZLrIeQluoWjmQ3dbOhmQ7dwNLOhmw3dwtHMhm42dAtHs8xjUiXLNmzYkOsh5CW6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzGNSJcvKyspyPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5TKpkWdeuXXM9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqmTZtGnTcj2EvES3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4WiWeUyqAAAAAAAAGDCpkmU77bRTroeQl+gWjmY2dLOhmw3dwtHMhm42dAtHMxu62dAtHM0yj0mVLFu3bl2uh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUsW7BgQa6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JFQAAAAAAAIMC55wL/aXKykqVlpaqoqJCJSUlW2Nc261NmzapsLAw18PIO3QLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmbpCZnzYE+VLJs+fXquh5CX6BaOZjZ0s6GbDd3C0cyGbjZ0C0czG7rZ0C0czTKPSZUs48BANnQLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZx6RKlvFxKRu6haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ezzOOYKlm2du1aNWvWLNfDyDt0C0czG7rZ0M2GbuFoZkM3G7qFo5kN3WzoFo5m6eGYKtuwqVOn5noIeYlu4WhmQzcbutnQLRzNbOhmQ7dwNLOhmw3dwtEs85hUAQAAAAAAMGBSJcu6d++e6yHkJbqFo5kN3WzoZkO3cDSzoZsN3cLRzIZuNnQLR7PMY1IlyzZv3pzrIeQluoWjmQ3dbOhmQ7dwNLOhmw3dwtHMhm42dAtHs8xjUiXLvvnmm1wPIS/RLRzNbOhmQzcbuoWjmQ3dbOgWjmY2dLOhWziaZR6TKgAAAAAAAAZ8pXKWbdiwQU2aNMn1MPIO3cLRzIZuNnSzoVs4mtnQzYZu4WhmQzcbuoWjWXr4SuVt2KxZs3I9hLxEt3A0s6GbDd1s6BaOZjZ0s6FbOJrZ0M2GbuFolnlMqmTZ6tWrcz2EvES3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4WiWeUyqZFnLli1zPYS8RLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJZ5HFMly9avX6/i4uJcDyPv0C0czWzoZkM3G7qFo5kN3WzoFo5mNnSzoVs4mqWHY6psw6ZMmZLrIeQluoWjmQ3dbOhmQ7dwNLOhmw3dwtHMhm42dAtHs8xjUgUAAAAAAMCASZUs69atW66HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JlSxr1IjkFnQLRzMbutnQzYZu4WhmQzcbuoWjmQ3dbOgWjmaZR9Esmz9/fq6HkJfoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9JFQAAAAAAAAO+UjnL1q5dq2bNmuV6GHmHbuFoZkM3G7rZ0C0czWzoZkO3cDSzoZsN3cLRLD18pfI2bN68ebkeQl6iWzia2dDNhm42dAtHMxu62dAtHM1s6GZDt3A0yzwmVbJs5cqVuR5CXqJbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDTLPCZVsoxdrWzoFo5mNnSzoZsN3cLRzIZuNnQLRzMbutnQLRzNMo9jqmTZxo0bVVRUlOth5B26haOZDd1s6GZDt3A0s6GbDd3C0cyGbjZ0C0ez9HBMlW3Y5MmTcz2EvES3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4WiWeYWWX4ru3FJZWZnRwTQEq1evppsB3cLRzIZuNnSzoVs4mtnQzYZu4WhmQzcbuoWjWXqijdL5YI9pUmXp0qWSpG7dull+HQAAAAAAYJu2cuVKlZaW1nke06RKmzZtJElfffVVvVeAuMrKSnXr1k1ff/01x6IJQLdwNLOhmw3dbOgWjmY2dLOhWzia2dDNhm7haJY+55xWrlypLl261Hte06RKo0b+UCylpaXcGQYlJSV0M6BbOJrZ0M2GbjZ0C0czG7rZ0C0czWzoZkO3cDRLT7o7kHCgWgAAAAAAAAMmVQAAAAAAAAxMkyrFxcW69tprVVxcnOnxbNfoZkO3cDSzoZsN3WzoFo5mNnSzoVs4mtnQzYZu4Wi2dRS4dL4jCAAAAAAAAEn4+A8AAAAAAIABkyoAAAAAAAAGTKoAAAAAAAAYMKkCAAAAAABgYJpUuffee9WjRw81bdpUgwcP1gcffJDpceWtm2++Wfvss49atWqlDh066Nhjj9XMmTOTzrNu3Tqdd955atu2rVq2bKkTTjhB3333XY5GvG265ZZbVFBQoJEjR8ZOo1tq3377rU477TS1bdtWzZo104ABA/TRRx/Ffu6c0zXXXKPOnTurWbNmGjZsmGbPnp3DEefW5s2bdfXVV6tnz55q1qyZevXqpRtuuEGJx+ymmfTWW29pxIgR6tKliwoKCvTvf/876efpNFq2bJlOPfVUlZSUqKysTGeddZZWrVqVxVuRfXV127hxo37/+99rwIABatGihbp06aKf//znWrBgQdJl0K3m4y3Rr3/9axUUFOjuu+9OOr2hdUun2YwZM3T00UertLRULVq00D777KOvvvoq9vOGuF6tr9uqVat0/vnnq2vXrmrWrJn69eun+++/P+k8Da1bprZtv/rqKx111FFq3ry5OnTooMsvv1ybNm3K5k3Jqvq6LVu2TBdccIH69OmjZs2aaccdd9SFF16oioqKpMuhW83HW5RzTkcccUTK53JD6pZus4kTJ+qQQw5RixYtVFJSogMPPFBr166N/byhrUczKXhS5emnn9Yll1yia6+9VpMnT9bAgQM1fPhwLV68eGuML++8+eabOu+88zRp0iS9+uqr2rhxow477DCtXr06dp6LL75Y//3vf/XPf/5Tb775phYsWKDjjz8+h6Petnz44Yf661//qt133z3pdLrVtHz5cu23334qKirSSy+9pM8//1x33HGHWrduHTvPbbfdpj//+c+6//779f7776tFixYaPny41q1bl8OR586tt96q++67T//3f/+nGTNm6NZbb9Vtt92mUaNGxc5DM2n16tUaOHCg7r333pQ/T6fRqaeequnTp+vVV1/V888/r7feekvnnntutm5CTtTVbc2aNZo8ebKuvvpqTZ48WWPHjtXMmTN19NFHJ52PbrV79tlnNWnSJHXp0qXGzxpat/qazZ07V/vvv7923XVXvfHGG5o6daquvvpqNW3aNHaehrhera/bJZdconHjxumxxx7TjBkzNHLkSJ1//vl67rnnYudpaN0ysW27efNmHXXUUdqwYYPee+89PfzwwxozZoyuueaaXNykrKiv24IFC7RgwQL96U9/0rRp0zRmzBiNGzdOZ511Vuwy6Jb68RZ19913q6CgoMbpDa1bOs0mTpyoww8/XIcddpg++OADffjhhzr//PPVqFF8OqChrUczygX6wQ9+4M4777zY/zdv3uy6dOnibr755tCLahAWL17sJLk333zTOefcihUrXFFRkfvnP/8ZO8+MGTOcJDdx4sRcDXObsXLlSte7d2/36quvuqFDh7qLLrrIOUe32vz+9793+++/f60/r6qqcp06dXK333577LQVK1a44uJi9+STT2ZjiNuco446yv3yl79MOu344493p556qnOOZqlIcs8++2zs/+k0+vzzz50k9+GHH8bO89JLL7mCggL37bffZm3suVS9WyoffPCBk+Tmz5/vnKObc7V3++abb9wOO+zgpk2b5rp37+7uuuuu2M8aerdUzU466SR32mmn1fo7rFdTd+vfv7/74x//mHTaoEGD3P/8z/845+jmnG3b9sUXX3SNGjVyixYtip3nvvvucyUlJW79+vXZvQE5Ur1bKv/4xz9ckyZN3MaNG51zdHOu9m6ffPKJ22GHHdzChQtrPJcberdUzQYPHuyuuuqqWn+noa9Ht1TQniobNmzQxx9/rGHDhsVOa9SokYYNG6aJEyduydzOdiu6C1+bNm0kSR9//LE2btyY1HDXXXfVjjvuSENJ5513no466qikPhLdavPcc89p77331k9/+lN16NBBe+65px588MHYz+fNm6dFixYldSstLdXgwYMbbLd9991Xr7/+umbNmiVJ+vTTT/XOO+/oiCOOkESzdKTTaOLEiSorK9Pee+8dO8+wYcPUqFEjvf/++1kf87aqoqJCBQUFKisrk0S32lRVVen000/X5Zdfrv79+9f4Od2SVVVV6YUXXtAuu+yi4cOHq0OHDho8eHDS7vGsV1Pbd9999dxzz+nbb7+Vc04TJkzQrFmzdNhhh0mim2Tbtp04caIGDBigjh07xs4zfPhwVVZWavr06Vkcfe5U71bbeUpKSlRYWCiJblLqbmvWrNHPfvYz3XvvverUqVON32no3ao3W7x4sd5//3116NBB++67rzp27KihQ4fqnXfeif0O69EtEzSpsmTJEm3evDnpASpJHTt21KJFizI6sO1BVVWVRo4cqf3220+77babJGnRokVq0qRJbAM6iobSU089pcmTJ+vmm2+u8TO6pfbll1/qvvvuU+/evfXyyy/rN7/5jS688EI9/PDDkhRrw3M27oorrtDJJ5+sXXfdVUVFRdpzzz01cuRInXrqqZJolo50Gi1atEgdOnRI+nlhYaHatGlDx4h169bp97//vU455RSVlJRIolttbr31VhUWFurCCy9M+XO6JVu8eLFWrVqlW265RYcffrheeeUVHXfccTr++OP15ptvSmK9WptRo0apX79+6tq1q5o0aaLDDz9c9957rw488EBJdLNu2y5atCjlOiP6s+1dqm7VLVmyRDfccEPSxy3olrrbxRdfrH333VfHHHNMyt9ryN1SNfvyyy8lSdddd53OOeccjRs3ToMGDdKPfvSj2PHwWI9umcJcD2B7dt5552natGlJs4BI7euvv9ZFF12kV199Nenz3qhbVVWV9t57b910002SpD333FPTpk3T/fffrzPOOCPHo9s2/eMf/9Djjz+uJ554Qv3799eUKVM0cuRIdenShWbImo0bN+rEE0+Uc0733XdfroezTfv44491zz33aPLkySk/O4+aqqqqJEnHHHOMLr74YknSHnvsoffee0/333+/hg4dmsvhbdNGjRqlSZMm6bnnnlP37t311ltv6bzzzlOXLl1q7EXbELFta1Nft8rKSh111FHq16+frrvuuuwObhuWqttzzz2n8ePH65NPPsnhyLZdqZpF1wm/+tWv9Itf/EKSf83w+uuv66GHHkr5hjbCBO2p0q5dOzVu3LjG0by/++67lLteNWTnn3++nn/+eU2YMEFdu3aNnd6pUydt2LBBK1asSDp/Q2/48ccfa/HixRo0aJAKCwtVWFioN998U3/+859VWFiojh070i2Fzp07q1+/fkmn9e3bN/btDtE2PGfjLr/88tjeKgMGDNDpp5+uiy++OLZCoVn90mnUqVOnGgcw37Rpk5YtW9bgO0YnVObPn69XX301tpeKRLdU3n77bS1evFg77rhjbP0wf/58XXrpperRo4ckulXXrl07FRYW1rt+YL2abO3atfrDH/6gO++8UyNGjNDuu++u888/XyeddJL+9Kc/SWrY3bZk27ZTp04p1xnRn23PausWtXLlSh1++OFq1aqVnn32WRUVFcV+Rrea3caPH6+5c+eqrKwstk6QpBNOOEEHHXSQpIbbrbZmnTt3lqR61wmsR+2CJlWaNGmivfbaS6+//nrstKqqKr3++usaMmRIxgeXj5xzOv/88/Xss89q/Pjx6tmzZ9LP99prLxUVFSU1nDlzpr766qsG3fBHP/qRPvvsM02ZMiX2Z++999app54a+zfdatpvv/1qfGXarFmz1L17d0lSz5491alTp6RulZWVev/99xtstzVr1iQd6VySGjduHJvFp1n90mk0ZMgQrVixQh9//HHsPOPHj1dVVZUGDx6c9TFvK6ITKrNnz9Zrr72mtm3bJv2cbjWdfvrpmjp1atL6oUuXLrr88sv18ssvS6JbdU2aNNE+++xT5/qB7ZGaNm7cqI0bN9a5jmiI3TKxbTtkyBB99tlnSS/aopPK1V/obS/q6yb5dedhhx2mJk2a6Lnnnquxtzbdana74ooraqwTJOmuu+7S6NGjJTW8bvU169Gjh7p06VLnOoH16BYKPbLtU0895YqLi92YMWPc559/7s4991xXVlaWdHTlhuw3v/mNKy0tdW+88YZbuHBh7M+aNWti5/n1r3/tdtxxRzd+/Hj30UcfuSFDhrghQ4bkcNTbpsRv/3GObql88MEHrrCw0N14441u9uzZ7vHHH3fNmzd3jz32WOw8t9xyiysrK3P/+c9/3NSpU90xxxzjevbs6dauXZvDkefOGWec4XbYYQf3/PPPu3nz5rmxY8e6du3aud/97nex89DMfxPXJ5984j755BMnyd15553uk08+iX1LTTqNDj/8cLfnnnu6999/373zzjuud+/e7pRTTsnVTcqKurpt2LDBHX300a5r165uypQpSeuIxG8joFvNx1t11b/9x7mG162+ZmPHjnVFRUXugQcecLNnz3ajRo1yjRs3dm+//XbsMhrierW+bkOHDnX9+/d3EyZMcF9++aUbPXq0a9q0qfvLX/4Su4yG1i0T27abNm1yu+22mzvssMPclClT3Lhx41z79u3dlVdemYublBX1dauoqHCDBw92AwYMcHPmzEk6z6ZNm5xzdKvt8Vadqn37T0Prlk6zu+66y5WUlLh//vOfbvbs2e6qq65yTZs2dXPmzImdp6GtRzMpeFLFOedGjRrldtxxR9ekSRP3gx/8wE2aNCnT48pbklL+GT16dOw8a9eudb/97W9d69atXfPmzd1xxx3nFi5cmLtBb6OqT6rQLbX//ve/brfddnPFxcVu1113dQ888EDSz6uqqtzVV1/tOnbs6IqLi92PfvQjN3PmzByNNvcqKyvdRRdd5HbccUfXtGlTt9NOO7n/+Z//SXpRSzPnJkyYkHJZdsYZZzjn0mu0dOlSd8opp7iWLVu6kpIS94tf/MKtXLkyB7cme+rqNm/evFrXERMmTIhdBt1qPt6qSzWp0tC6pdPs73//u9t5551d06ZN3cCBA92///3vpMtoiOvV+rotXLjQnXnmma5Lly6uadOmrk+fPu6OO+5wVVVVsctoaN0ytW1bXl7ujjjiCNesWTPXrl07d+mll8a+Onh7VF+32h6Lkty8efNil0O3mo+3VL9T/evRG1K3dJvdfPPNrmvXrq558+ZuyJAhSZPszjW89WgmFTjnnHUvFwAAAAAAgIYq6JgqAAAAAAAA8JhUAQAAAAAAMGBSBQAAAAAAwIBJFQAAAAAAAAMmVQAAAAAAAAyYVAEAAAAAADBgUgUAAAAAAMCASRUAAAAAAAADJlUAAMiCM888U8cee2zOrv/000/XTTfdlLPrz4QxY8aorKzM/PuLFi3SoYceqhYtWmzR5WxPrrjiCl1wwQW5HgYAAHmLSRUAALZQQUFBnX+uu+463XPPPRozZkxOxvfpp5/qxRdf1IUXXpiT699W3HXXXVq4cKGmTJmiWbNm5Xo49SooKNC///3vjFxWeXm5CgoKNGXKlKTTL7vsMj388MP68ssvM3I9AAA0NIW5HgAAAPlu4cKFsX8//fTTuuaaazRz5szYaS1btlTLli1zMTRJ0qhRo/TTn/40p2PYFsydO1d77bWXevfuXet5Nm7cqKKioiyOauvbsGFDrT9r166dhg8frvvuu0+33357FkcFAMD2gT1VAADYQp06dYr9KS0tVUFBQdJpLVu2rPHxn4MOOkgXXHCBRo4cqdatW6tjx4568MEHtXr1av3iF79Qq1attPPOO+ull15Kuq5p06bpiCOOUMuWLdWxY0edfvrpWrJkSa1j27x5s/71r39pxIgRSaf/5S9/Ue/evdW0aVN17NhRP/nJT2I/q6qq0s0336yePXuqWbNmGjhwoP71r38l/f706dP14x//WCUlJWrVqpUOOOAAzZ07N/b7f/zjH9W1a1cVFxdrjz320Lhx42K/G91rYuzYsTr44IPVvHlzDRw4UBMnTky6jjFjxmjHHXdU8+bNddxxx2np0qVJP//000918MEHq1WrViopKdFee+2ljz76KGWHHj166JlnntEjjzyigoICnXnmmZL83iD33Xefjj76aLVo0UI33nijJOm+++5Tr1691KRJE/Xp00ePPvpo0uUVFBTor3/9q3784x+refPm6tu3ryZOnKg5c+booIMOUosWLbTvvvvGmqSyYcMGnX/++ercubOaNm2q7t276+abb46NV5KOO+44FRQUxP4/d+5cHXPMMerYsaNatmypffbZR6+99lqN23rDDTfo5z//uUpKSnTuueeqZ8+ekqQ999xTBQUFOuigg2LnHzFihJ566qlaxwkAAOrgAABAxowePdqVlpbWOP2MM85wxxxzTOz/Q4cOda1atXI33HCDmzVrlrvhhhtc48aN3RFHHOEeeOABN2vWLPeb3/zGtW3b1q1evdo559zy5ctd+/bt3ZVXXulmzJjhJk+e7A499FB38MEH1zqeyZMnO0lu0aJFsdM+/PBD17hxY/fEE0+48vJyN3nyZHfPPffEfv6///u/btddd3Xjxo1zc+fOdaNHj3bFxcXujTfecM45980337g2bdq4448/3n344Ydu5syZ7qGHHnJffPGFc865O++805WUlLgnn3zSffHFF+53v/udKyoqcrNmzXLOOTdv3jwnye26667u+eefdzNnznQ/+clPXPfu3d3GjRudc85NmjTJNWrUyN16661u5syZ7p577nFlZWVJbfv37+9OO+00N2PGDDdr1iz3j3/8w02ZMiVlh8WLF7vDDz/cnXjiiW7hwoVuxYoVzjnnJLkOHTq4hx56yM2dO9fNnz/fjR071hUVFbl7773XzZw5091xxx2ucePGbvz48bHLk+R22GEH9/TTT7uZM2e6Y4891vXo0cMdcsghbty4ce7zzz93P/zhD93hhx9e631z++23u27durm33nrLlZeXu7fffts98cQTsfFKcqNHj3YLFy50ixcvds45N2XKFHf//fe7zz77zM2aNctdddVVrmnTpm7+/Pmxy+3evbsrKSlxf/rTn9ycOXPcnDlz3AcffOAkuddee80tXLjQLV26NHb+GTNmOElu3rx5tY4VAACkxqQKAAAZFDKpsv/++8f+v2nTJteiRQt3+umnx05buHChk+QmTpzonHPuhhtucIcddljS5X799ddOkps5c2bK8Tz77LOucePGrqqqKnbaM88840pKSlxlZWWN869bt841b97cvffee0mnn3XWWe6UU05xzjl35ZVXup49e7oNGzakvM4uXbq4G2+8Mem0ffbZx/32t791zsUnVf72t7/Ffj59+nQnyc2YMcM559wpp5zijjzyyKTLOOmkk5LatmrVyo0ZMyblGFI55phj3BlnnJF0miQ3cuTIpNP23Xdfd8455ySd9tOf/jRpPJLcVVddFfv/xIkTnST397//PXbak08+6Zo2bVrreC644AJ3yCGHJN031cf27LPP1nezXP/+/d2oUaNi/+/evbs79thjk84Tbf7JJ5/U+P2KigonKTZpBgAA0sfHfwAAyJHdd9899u/GjRurbdu2GjBgQOy0jh07SpIWL14syX/cZcKECbFjtLRs2VK77rqrJNX6MZO1a9equLhYBQUFsdMOPfRQde/eXTvttJNOP/10Pf7441qzZo0kac6cOVqzZo0OPfTQpOt55JFHYtcxZcoUHXDAASmPPVJZWakFCxZov/32Szp9v/3204wZM2q9/Z07d066rTNmzNDgwYOTzj9kyJCk/19yySU6++yzNWzYMN1yyy11ftSmLnvvvXfS/2fMmBE8/uh9Vf3+W7dunSorK1Ne75lnnqkpU6aoT58+uvDCC/XKK6/UO9ZVq1bpsssuU9++fVVWVqaWLVtqxowZ+uqrr+q8TXVp1qyZJMUeAwAAIH1MqgAAkCPVJyUKCgqSTotOhFRVVUnyL6hHjBihKVOmJP2ZPXu2DjzwwJTX0a5dO61ZsybpYKWtWrXS5MmT9eSTT6pz58665pprNHDgQK1YsUKrVq2SJL3wwgtJ1/H555/HjqsSfRGeydtf/bam47rrrtP06dN11FFHafz48erXr5+effbZ4HG0aNEi+Hek1OMPuU2DBg3SvHnzdMMNN2jt2rU68cQTk45tk8pll12mZ599VjfddJPefvttTZkyRQMGDKhxMNqQ27Rs2TJJUvv27dP+HQAA4DGpAgBAnhg0aJCmT5+uHj16aOedd076U9uL6D322EOS9PnnnyedXlhYqGHDhum2227T1KlTVV5eHpuYKC4u1ldffVXjOrp16ybJ76Hx9ttva+PGjTWur6SkRF26dNG7776bdPq7776rfv36pX1b+/btq/fffz/ptEmTJtU43y677KKLL75Yr7zyio4//niNHj067euo67q3dPzpKikp0UknnaQHH3xQTz/9tJ555pnYJEdRUZE2b95cYxxnnnmmjjvuOA0YMECdOnVSeXl5vdfTpEkTSapxeZI/+HFRUZH69++/5TcIAIAGhq9UBgAgT5x33nl68MEHdcopp+h3v/ud2rRpozlz5uipp57S3/72NzVu3LjG77Rv316DBg3SO++8E5tgef755/Xll1/qwAMPVOvWrfXiiy+qqqpKffr0UatWrXTZZZfp4osvVlVVlfbff39VVFTo3XffVUlJic444wydf/75GjVqlE4++WRdeeWVKi0t1aRJk/SDH/xAffr00eWXX65rr71WvXr10h577KHRo0drypQpevzxx9O+rRdeeKH2228//elPf9Ixxxyjl19+OekbhNauXavLL79cP/nJT9SzZ0998803+vDDD3XCCSdscefLL79cJ554ovbcc08NGzZM//3vfzV27Nga37Kzpe6880517txZe+65pxo1aqR//vOf6tSpk8rKyiT5b/F5/fXXtd9++6m4uFitW7dW7969NXbsWI0YMUIFBQW6+uqr09q7p0OHDmrWrJnGjRunrl27qmnTpiotLZUkvf322zrggAMytgcSAAANCXuqAACQJ6J7gGzevFmHHXaYBgwYoJEjR6qsrEyNGtW+Sj/77LOTJjTKyso0duxYHXLIIerbt6/uv/9+Pfnkk7E9FW644QZdffXVuvnmm9W3b18dfvjheuGFF2Jfy9u2bVuNHz9eq1at0tChQ7XXXnvpwQcfjH305cILL9Qll1yiSy+9VAMGDNC4ceP03HPPqXfv3mnf1h/+8Id68MEHdc8992jgwIF65ZVXdNVVV8V+3rhxYy1dulQ///nPtcsuu+jEE0/UEUccoeuvvz6oaSrHHnus7rnnHv3pT39S//799de//lWjR49O+hriTGjVqpVuu+027b333tpnn31UXl6uF198MXZf3nHHHXr11VfVrVs37bnnnpL8REzr1q217777asSIERo+fLgGDRpU73UVFhbqz3/+s/7617+qS5cuOuaYY2I/e+qpp3TOOedk9LYBANBQFDjnXK4HAQAAtp61a9eqT58+evrpp2sc7BUN20svvaRLL71UU6dOVWEhOzADABCKPVUAANjONWvWTI888oiWLFmS66FgG7N69WqNHj2aCRUAAIzYUwUAAAAAAMCAPVUAAAAAAAAMmFQBAAAAAAAwYFIFAAAAAADAgEkVAAAAAAAAAyZVAAAAAAAADJhUAQAAAAAAMGBSBQAAAAAAwIBJFQAAAAAAAAMmVQAAAAAAAAz+Hz+n5iG5q5IzAAAAAElFTkSuQmCC\n"
},
"metadata": {}
}
]
}
]
}
================================================
FILE: pyairbyte_notebooks/RAG_using_github_pyairbyte_chroma.ipynb
================================================
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# **End-to-End RAG Tutorial Using Github, PyAirbyte, Chroma, and LangChain**\n",
"This notebook illustrates the complete setup of a Retrieval-Augmented Generation (RAG) pipeline. \n",
"We extract data from a GitHub repository using PyAirbyte, store the data in a Chroma vector store, and use LangChain to perform RAG on the stored data. \n",
"## **Prerequisites**\n",
"**1) OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n",
"\n",
"**2) Github Personal Access Token**:\n",
" - **Create a Github Account**: Sign up for an account on [Github](https://www.github.com/).\n",
" - **Generate an API Key**: Cick on your profile icon->Settings->Developer Settings and generate a new API key. For detailed instructions, refer to the [Github documentation](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens).\n"
],
"metadata": {
"id": "lv1fce-k_Ul5"
}
},
{
"cell_type": "markdown",
"source": [
"# **Installing Dependencies**\n",
"First Thing First ! \n",
"Lets get the dependencies installed before anything else!!"
],
"metadata": {
"id": "mbc7jQsW-kGs"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true,
"id": "5FFGW2iOhOqT"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte langchain langchain-openai chromadb python-dotenv langchainhub langchain-chroma"
]
},
{
"cell_type": "markdown",
"source": [
"## **Source Setup : Github with PyAirbyte**\n",
"The code you see below configures an Airbyte source to pull out data from a github repository.\n",
"\n",
"You can also customize the configuration according to your own needs.\n",
"See [this](https://docs.airbyte.com/integrations/sources/github#reference)\n",
"\n",
"Note that we here only fetch data from the Commits Stream \n",
"To know about all the available streams go [here](https://docs.airbyte.com/integrations/sources/github#supported-streams)\n"
],
"metadata": {
"id": "aBfzg42A_mhd"
}
},
{
"cell_type": "code",
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-github\",\n",
" config={\n",
" \"credentials\": {\n",
" \"personal_access_token\": \"your_personal_access_token\"\n",
" },\n",
" \"repositories\": [\"your_github_username/your_repository_ID\"]\n",
" }\n",
")\n",
"source.check()\n",
"\n",
"source.get_available_streams()\n",
"source.select_streams([\"commits\"])\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache)\n",
"\n",
"commits_details = [doc for doc in result[\"commits\"].to_documents()]\n",
"\n",
"print(str(commits_details[0]))"
],
"metadata": {
"collapsed": true,
"id": "vMiRDXs1ii3S"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# **Split Documents into Chunks**\n",
"Large documents are split into smaller chunks to make them easier to handle. This also helps in improving the efficiency of the retrieval process, as smaller chunks can be more relevant to specific queries.\n",
" \n",
"Here we set each chunk size to 512 characters and adjacent chunks will overlap by 50 characters to ensure continuity of context\n",
" \n",
"Then the loop converts all metadata to string format to ensure consistent processing later in the pipeline."
],
"metadata": {
"id": "f4yYrQ9lLVRJ"
}
},
{
"cell_type": "code",
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(commits_details)\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
],
"metadata": {
"collapsed": true,
"id": "_shmtR8Zl6d5"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"YOUR_OPENAI_API_KEY\")\n",
"embeddings = OpenAIEmbeddings()"
],
"metadata": {
"collapsed": true,
"id": "CNgHPzULmB0j"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# **Setting up Chroma**\n",
"Create and configure a Chroma vector store to store the document embeddings. \n",
"First we initialize Chroma Client\n",
" \n",
"Then we create Chroma Vector Store from Documents\n",
" \n",
"Finally we use embedding function when accessing the collection\n",
" \n",
"Since currently there is a waitlist for Chroma,We initialize the Chroma client in persistent mode (local file)"
],
"metadata": {
"id": "TwHTBa43M7fM"
}
},
{
"cell_type": "code",
"source": [
"import chromadb\n",
"from langchain_chroma import Chroma\n",
"from chromadb.utils import embedding_functions\n",
"\n",
"persist_directory = 'chroma_db'\n",
"client = chromadb.PersistentClient(path=persist_directory)\n",
"collection_name = \"github_commits\"\n",
"\n",
"openai_lc_client = Chroma.from_documents(\n",
" documents=chunked_docs,\n",
" embedding=embeddings,\n",
" persist_directory=persist_directory,\n",
" collection_name=collection_name\n",
")\n",
"\n",
"openai_ef = embedding_functions.OpenAIEmbeddingFunction(\n",
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
" model_name=\"text-embedding-ada-002\"\n",
")\n",
"collection = client.get_collection(name=collection_name, embedding_function=openai_ef)\n"
],
"metadata": {
"collapsed": true,
"id": "PzB1caQTmbuS"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# **Querying Chroma and RAG Pipeline**\n",
"Finally we use LangChain to retrieve documents from Chroma and generate responses using an OpenAI chat model."
],
"metadata": {
"id": "MoCApKIWNpBd"
}
},
{
"cell_type": "code",
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"# Initialize the LLM\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"# Set up the retriever from the Chroma vector store\n",
"retriever = openai_lc_client.as_retriever()\n",
"\n",
"# Set up the prompt\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
"# Function to format documents\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"# Create the RAG chain\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"\n",
"print(\"Langchain RAG pipeline set up successfully.\")\n",
"\n",
"# Example query\n",
"response = rag_chain.invoke(\"Which are the commit messages of latest commits?\")\n",
"print(response)\n"
],
"metadata": {
"id": "uDPPklkxod8-"
},
"execution_count": null,
"outputs": []
}
]
}
================================================
FILE: pyairbyte_notebooks/README.md
================================================
# PyAirbyte Notebooks Quickstart
This quickstart will help you get started quickly with PyAirbyte.
## Quickstart Quicklinks
To jump right in, click on any of the below links to open a new Colab notebook from the provided quickstart template.
1. [Basic Features Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/pyairbyte_notebooks/PyAirbyte_Basic_Features_Demo.ipynb) - Walks through the basic functionality of PyAirbyte and how to use it in a Notebook environment.
2. [CoinAPI Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/pyairbyte_notebooks/PyAirbyte_CoinAPI_Demo.ipynb) - Shows how to provide credentials securely and perform basic graphing.
3. [GitHub Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/pyairbyte_notebooks/PyAirbyte_Github_Incremental_Demo.ipynb) - Demonstrates how to get data from GitHub, how to analyze GitHub metrics and how to refresh your cache data incrementally.
4. [GA4 Demo](https://colab.research.google.com/github/airbytehq/quickstarts/blob/master/pyairbyte_notebooks/PyAirbyte_GA4_Demo.ipynb) - A Google Analytics demo showing how to analyze page views and other GA metrics.
## How to use these Quickstarts
There are three ways to use the quickstart resources here.
### Google Colab
Google Colab ("Colab" for short) is a hosted version of Jupyter. Because it is hosted by google, most people can access Colab using their existing Google account. To use these notebooks in Colab, click on the "Open in Colab" badge at the top of the file.
Note:
- Colab doesn't come with virtual environment ("venv") support by default. For this reason, our demo workbooks start by installing venv support as a prerequisite, using `apt-get`.
### Self-Hosted Jupyter
If you have a self-hosted Jupyter instance, you can load any of the notebooks in this directory.
### VS Code Notebooks
You can run these notebooks natively in VS Code if you have the Python extension installed. You can also use GitHub Codespaces to open a new VS Code devcontainer in your web browser.
## Securely Managed Secrets
You can pass secrets to PyAirbyte by using the `get_secret()` function. This call will retrieve a named secret from any of the following locations:
1. Google Colab Secrets
2. Environment Variables
3. Masked User Input (via [getpass](https://docs.python.org/3/library/getpass.html))
If you are using Google Colab, we suggest using the Colab secrets feature. For other environments, you can set your secret values in environment variables.
Note: The `get_secret()` implementation in PyAirbyte is provided for your convenience as a secure runtime-agnostic default secrets interface. You are always free to use any secrets management platform you are most familiar with.
**Warning:** Please do not enter your secrets directly into notebook cells. Doing so can cause the secret to be leaked into logs and/or in the "previous versions" look-back of the notebook. Instead, simply call `get_secret()` without pre-initializing the value. If the value is not already initialized, you will be prompted for secret values interactively and all values will be masked during input to avoid accidental leakage. This is performed using the Python standard library [`getpass`](https://docs.python.org/3/library/getpass.html).
================================================
FILE: pyairbyte_notebooks/rag_using_gdrive_pyairbyte_pinecone.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "8f104c9d-933a-45fd-9d2c-57e6d56a56b1",
"metadata": {},
"source": [
"# End-to-End RAG Tutorial Using GoogleDrive, PyAirbyte, Pinecone, and LangChain\n",
"\n",
"This notebook demonstrates an end-to-end Retrieval-Augmented Generation (RAG) pipeline. We will extract data from Google Drive using PyAirbyte, store it in a Pinecone vector store, and then use LangChain to perform RAG on the stored data. This workflow showcases how to integrate these tools to build a scalable RAG system.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **GoogleDrive**:\n",
" - Follow the instructions in the [GoogleDrive Source Connector Documentation](https://docs.airbyte.com/integrations/sources/google-drive) to set up your google-drive and obtain the service account json\n",
"\n",
"2. **Pinecone Account**:\n",
" - **Create a Pinecone Account**: Sign up for an account on the [Pinecone website](https://www.pinecone.io/).\n",
" - **Obtain Pinecone API Key**: Generate a new API key from your Pinecone project settings. For detailed instructions, refer to the [Pinecone documentation](https://docs.pinecone.io/docs/quickstart).\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n"
]
},
{
"cell_type": "markdown",
"id": "8e1d3a0b-d446-43b0-b9dd-691fd837e5eb",
"metadata": {},
"source": [
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "raw",
"id": "ffebf71e-2b9a-4a1a-b448-9812b49f120c",
"metadata": {},
"source": [
"!pip3 install airbyte openai langchain pinecone-client langchain-openai langchain-pinecone langchainhub "
]
},
{
"cell_type": "markdown",
"id": "7b51a295-8c8e-483d-a931-24ec2590dffa",
"metadata": {},
"source": [
"## Setup Source GoogleDrive with PyAirbyte\n",
"\n",
"The provided code configures an Airbyte source to extract data from an GoogleDrive Folder contains CSV file named NFLX.csv\n",
"\n",
"To configure according to your requirements, you can refer to [this references](https://docs.airbyte.com/integrations/sources/google-drive#reference).\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "6ddd2f96-8680-4a19-a02b-32fd0b703912",
"metadata": {},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
"Enter the value for secret 'service_json': ········\n"
]
},
{
"data": {
"text/html": [
"
Connection check succeeded for `source-google-drive`.\n",
"
\n"
],
"text/plain": [
"Connection check succeeded for `source-google-drive`.\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import airbyte as ab\n",
"\n",
"service_json = ab.get_secret('service_json')\n",
"\n",
"source = ab.get_source(\n",
" \"source-google-drive\",\n",
" install_if_missing=True,\n",
" config={\n",
" \"folder_url\": \"https://drive.google.com/drive/folders/1txtyBv_mfXYjn0R_-oxV3Vg5QOi-6XaI\",\n",
" \"credentials\": {\n",
" \"auth_type\": \"Service\",\n",
" \"service_account_info\": f\"\"\"{service_json}\"\"\",\n",
" },\n",
" \"streams\": [{\n",
" \"name\": \"NFLX\",\n",
" \"globs\": [\"**/*.csv\"],\n",
" \"format\": {\n",
" \"filetype\": \"csv\"\n",
" },\n",
" \"validation_policy\": \"Emit Record\",\n",
" \"days_to_sync_if_history_is_full\": 3\n",
" }]\n",
" \n",
" },\n",
" \n",
")\n",
"\n",
"# Verify the config and creds by running `check`:\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"id": "b12930b8-ff69-46e9-9daf-89b5ed49b68c",
"metadata": {},
"source": [
"This is a basic process of fetching data from a Google Drive CSV source using Airbyte and converting it into a list of document objects, making it suitable for further processing or analysis."
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "a8abe757-b263-4574-b220-da715c4d827e",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"## Read Progress\n",
"\n",
"Started reading at 11:20:50.\n",
"\n",
"Read **0** records over **2 seconds** (0.0 records / second).\n",
"\n",
"Finished reading at 11:20:53.\n",
"\n",
"Started finalizing streams at 11:20:53.\n",
"\n",
"Finalized **0** batches over 0 seconds.\n",
"\n",
"\n",
"\n",
"\n",
"------------------------------------------------\n"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Completed `source-google-drive` read operation at 16:50:53.\n",
"
\n"
],
"text/plain": [
"Completed `source-google-drive` read operation at \u001b[1;92m16:50:53\u001b[0m.\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"```yaml\n",
"_ab_source_file_last_modified: '2024-06-04T04:00:24.000000Z'\n",
"_ab_source_file_url: NFLX.csv\n",
"_airbyte_extracted_at: 2024-06-07 11:20:25.946000\n",
"_airbyte_meta: {}\n",
"_airbyte_raw_id: 01HZS6VC3573GHEKRHMW1KA41T\n",
"adj_close: '254.259995'\n",
"close: '254.259995'\n",
"date: '2018-02-05'\n",
"high: '267.899994'\n",
"low: '250.029999'\n",
"open: '262.000000'\n",
"volume: '11896100'\n",
"```\n",
"\n"
]
}
],
"source": [
"# This code reads data from a Google Drive CSV source and converts it into a list of document objects.\n",
"\n",
"source.select_all_streams() # Select all streams from the Google Drive source\n",
"read_result = source.read() # Read the data from the selected streams\n",
"documents_list = []\n",
"\n",
"# Convert the read data into document objects and add them to the list\n",
"for key, value in read_result.items():\n",
" docs = value.to_documents()\n",
" for doc in docs:\n",
" documents_list.append(doc)\n",
"\n",
"# Print the Single row of the csv \n",
"print(str(documents_list[0]))"
]
},
{
"cell_type": "markdown",
"id": "a04c48f5-1801-47b9-9244-1cf1b560c291",
"metadata": {},
"source": [
"## Use Langchain to build a RAG pipeline."
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "ebef2365-3570-43ad-b05d-a9e3ef86aef1",
"metadata": {},
"outputs": [],
"source": [
"RecursiveCharacterTextSplitter from the langchain library to split documents into smaller chunks of 512 characters with a 50-character overlap.\n",
"It then converts all metadata values in each chunk to strings and prints the total number of created document chunks.\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "81cada6b-c1bb-4cbf-9a40-30f2547ed15c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Created 1009 document chunks.\n"
]
}
],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores.utils import filter_complex_metadata\n",
"\n",
"\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(documents_list)\n",
"chunked_docs = filter_complex_metadata(chunked_docs)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "50b756b3-745b-49ff-a57a-d1d6371243fe",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"from langchain_community.embeddings import HuggingFaceEmbeddings\n",
"## Create Embeddings using HuggingFace sentence-transformers/all-mpnet-base-v2 model\n",
"embeddings=HuggingFaceEmbeddings()"
]
},
{
"cell_type": "markdown",
"id": "33c36b1f-f03b-4f78-bbe1-9e9035af5a1d",
"metadata": {},
"source": [
"## Setting up Pinecone\n",
"Pinecone is a managed vector database service designed for fast similarity search and real-time recommendation systems, offering scalability, efficiency, and ease of integration."
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "a61ce285-9d58-43e0-808b-786803364d8f",
"metadata": {},
"outputs": [],
"source": [
"from pinecone import Pinecone, ServerlessSpec\n",
"import os\n",
"os.environ['PINECONE_API_KEY'] = ab.get_secret(\"PINECONE_API_KEY\")\n",
"index_name = \"gdriveairbyteindex\"\n",
"\n",
"pc = Pinecone()\n",
"\n",
"# Create pinecone index if not exists otherwise skip this step\n",
"if not (pc.list_indexes()[0]['name'] == index_name):\n",
" pc.create_index(\n",
" name=index_name,\n",
" dimension=768, \n",
" metric=\"cosine\", \n",
" spec=ServerlessSpec(\n",
" cloud=\"aws\",\n",
" region=\"us-east-1\"\n",
" ) \n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 44,
"id": "92f6e244-302b-4b6e-b490-3678412ba589",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'dimension': 768,\n",
" 'index_fullness': 0.0,\n",
" 'namespaces': {'': {'vector_count': 1009}},\n",
" 'total_vector_count': 1009}"
]
},
"execution_count": 44,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"index = pc.Index(index_name)\n",
"index.describe_index_stats()"
]
},
{
"cell_type": "markdown",
"id": "c2034d80-b359-4240-a695-c959619fae4a",
"metadata": {},
"source": [
"## PineconeVectorStore\n",
"PineconeVectorStore to store and index high-dimensional vectors extracted from documents, leveraging embeddings provided by Hugging Face"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "874ddc8a-035e-436c-8a14-a7ed24fc4adb",
"metadata": {},
"outputs": [],
"source": [
"from langchain_pinecone import PineconeVectorStore\n",
"\n",
"pinecone = PineconeVectorStore.from_documents(\n",
" chunked_docs, embedding=embeddings, index_name=index_name\n",
")"
]
},
{
"cell_type": "markdown",
"id": "87e9990b-4982-44b6-9163-b12a3109e741",
"metadata": {},
"source": [
"## RAG\n",
"Retrieval Augumented Generation provides the Large Language Model (LLM) the context and ask the Large Language Model (LLM) to use the context to generate the response.\n",
"\n",
"This RAG implementation uses the vector databases to the store the text doc embeddings (generated from the data from your knowledge base) and based on the given query, this code retreives the relevant information from the pinecone vector database and add that text context to your prompt. This will be used by the llm to generate the response"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "b6d3490a-40e9-48d5-a8f1-5e6571f7e036",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Langchain RAG pipeline set up successfully.\n"
]
}
],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"\n",
"retriever = pinecone.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")"
]
},
{
"cell_type": "code",
"execution_count": 47,
"id": "f059fe04-bf3a-4bd8-b674-1f421f3bb5f4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The source data is about stock market information for Netflix (NFLX) on different dates, including details like opening price, closing price, high, low, adjusted close, and volume traded. The data includes specific dates ranging from 2018-10-31 to 2019-10-18. The information is extracted from a CSV file named NFLX.csv.\n"
]
}
],
"source": [
"print(rag_chain.invoke(\"What is the source data about?\"))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (gdriveenv)",
"language": "python",
"name": ".venv-source-google-drive"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.19"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
================================================
FILE: pyairbyte_notebooks/rag_using_github_pyairbyte_weaviate.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"source": [
"# End-to-End RAG Tutorial Using Github, PyAirbyte, Weaviate\n",
"\n",
"In this notebook, we'll illustrate how to load data from github into Weaviate using PyAirbyte, then afterward retrieving In this, we've used source-github and stream 'issues' of it, but you can replace the source according to your requirements.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **Github**:\n",
" - Follow the instructions in the [Github Connector Docs](https://docs.airbyte.com/integrations/sources/github) to set up your github and get api_token.\n",
"\n",
"2. **Weaviate Account**:\n",
" - **Create a Weaviate Account**: Sign up for an account on the [Weaviate website](https://weaviate.io/).\n",
" - **Create a Cluster**: Follow this [instruction](https://weaviate.io/developers/weaviate/quickstart#step-1-create-a-weaviate-database) to create database and obatain weaviate API_KEY and URL.\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an acco\n",
" unt on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n",
"\n",
"\n",
"## Install PyAirbyte and other dependencies"
],
"metadata": {
"id": "5pBIRIujjIw4"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "4dv5cOvKi7xH"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte weaviate-client python-dotenv"
]
},
{
"cell_type": "markdown",
"source": [
"#Setup Source Github\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left."
],
"metadata": {
"id": "p-S0HQbWlqiM"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wo8v-XcGi7xI"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-github\",\n",
" config={\n",
" \"repositories\": ab.get_secret('GITHUB_REPOSITORY'),\n",
" \"credentials\": {\n",
" \"personal_access_token\": ab.get_secret('GITHUB_ACCESS_TOKEN'),\n",
" },\n",
" },\n",
")\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"source": [
"Reads the data from the selected issues stream, extracting the GitHub issues data for further processing."
],
"metadata": {
"id": "mScNlHAMlzpR"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "FV9E_J09i7xJ"
},
"outputs": [],
"source": [
"# In this notebook we are focused on only issues stream\n",
"# checkout all stream here : https://docs.airbyte.com/integrations/sources/gitlab#supported-streams\n",
"\n",
"print(source.get_available_streams())\n",
"source.select_streams([\"issues\"])\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache,force_full_refresh=True)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "X45nwGPAi7xJ"
},
"outputs": [],
"source": [
"issues_details = result['issues'].to_pandas() #coverting data from issues stream to pandas dataframe\n",
"\n",
"print(issues_details.columns)\n",
"print(issues_details[10])"
]
},
{
"cell_type": "markdown",
"source": [
"#Setting up Weaviate\n",
"Connect to the weaviate instance, Enter your weaviate cluster url and API_KEY"
],
"metadata": {
"id": "s7Z8OTeOmFS_"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UyzpsIKPi7xK"
},
"outputs": [],
"source": [
"import weaviate\n",
"\n",
"client = weaviate.Client(\n",
" url = ab.get_secret('WCD_URL'), # Replace with your Weaviate endpoint\n",
" auth_client_secret=weaviate.auth.AuthApiKey(api_key=ab.get_secret('WCD_API_KEY')), # Replace with your Weaviate instance API key\n",
" additional_headers = {\n",
" \"X-OpenAI-Api-Key\": ab.get_secret('OPENAI_API_KEY') # Replace with your Openai API key\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"source": [
"Weaviate stores data in collections. Each data object in a collection has a set of properties and a vector representation."
],
"metadata": {
"id": "RcUwvnBHnLO0"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "CQm5WxEzi7xK"
},
"outputs": [],
"source": [
"collection_name = \"issues\" # name of collection\n",
"class_obj = {\n",
" \"class\": collection_name,\n",
" \"vectorizer\": \"text2vec-openai\", # If set to \"none\" you must always provide vectors yourself. Could be any other \"text2vec-*\" also.\n",
" \"moduleConfig\": {\n",
" \"text2vec-openai\": {},\n",
" \"generative-openai\": {} # Ensure the `generative-openai` module is used for generative queries\n",
" }\n",
"}\n",
"\n",
"client.schema.create_class(class_obj)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "vMxfH8r8i7xL"
},
"outputs": [],
"source": [
"\n",
"# Batch imports are an efficient way to add multiple data objects and cross-references.\n",
"client.batch.configure(batch_size=100) # Configure batch\n",
"\n",
"#The following example adds objects to the collection.\n",
"with client.batch as batch: # Initialize a batch process\n",
" for i,d in enumerate(issues_details): # Batch import data\n",
" properties = {\n",
" \"issue_details\": d, # You can also change property name here and also add multiple property\n",
" }\n",
" batch.add_data_object(\n",
" data_object=properties,\n",
" class_name=collection_name\n",
" )"
]
},
{
"cell_type": "markdown",
"source": [
"Weaviate has integrated generative search capabilities, so that the retrieval and generation steps are combined into a single query. This means that you can use Weaviate's search capabilities to retrieve the data you need, and then in the same query, prompt the LLM with the same data.\n",
"\n",
"This makes it easier, faster and more efficient to implement generative search workflows in your application.\n",
"\n",
"You can checkout more ways of query [here](https://weaviate.io/developers/weaviate/starter-guides/generative#data-retrieval)."
],
"metadata": {
"id": "ihPrmv3DqRL6"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "4YnIQV_pi7xL",
"outputId": "287b69a9-2789-40b7-b1e5-c03754068638"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/*\n",
"Summary of Pagination Handling in Github Connector issues in airbytehq/quickstarts repository:\n",
"\n",
"- Pagination handling in the Github Connector issues involves retrieving a limited number of issues at a time from the Github API and then using pagination to fetch the next set of issues.\n",
"- This ensures that large datasets of issues can be efficiently retrieved without overwhelming the API or the system.\n",
"- The Github Connector in the airbytehq/quickstarts repository likely implements pagination logic to handle the retrieval of issues in a systematic and efficient manner.\n",
"- Pagination parameters such as page number and page size are typically used to control the retrieval of issues in batches.\n",
"- Proper pagination handling is crucial for managing large volumes of data and ensuring smooth and efficient data retrieval from the Github API.\n",
"*/\n"
]
}
],
"source": [
"response = (\n",
" client.query\n",
" .get(class_name=collection_name, properties=[\"issue_details\"])\n",
" .with_near_text({\"concepts\": [\"title\",\"comments\"]})\n",
" .with_generate(single_prompt=\"Use {issue_details}, Give me summary of Pagination Handling in Github COnnector issues in airbytehq/quickstarts repository\") # do not forget to add USE {property_names} in prompt\n",
" .with_limit(1)\n",
" .do()\n",
")\n",
"\n",
"print(response[\"data\"][\"Get\"][\"Issues\"][0][\"_additional\"][\"generate\"][\"singleResult\"])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
},
"colab": {
"provenance": []
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/rag_using_gitlab_pyairbyte_qdrant.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "3s3Jow7_BjQi"
},
"source": [
"# End-to-End RAG Tutorial Using Gitlab, PyAirbyte, Qdrant, and LangChain\n",
"\n",
"This notebook demonstrates an end-to-end Retrieval-Augmented Generation (RAG) pipeline. We will extract data from an gitlab using PyAirbyte, store it in a qdrantvector store, and then use LangChain to perform RAG on the stored data. This workflow showcases how to integrate these tools to build a scalable RAG system.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **Gitlab Account**:\n",
" - Follow the instructions in the [Gitlab Docs](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) to set up your gitlab account and obtain the necessary access token.\n",
"\n",
"2. **Qdrant Account**:\n",
" - **Create a Qdrant Account**: Sign up for an account on the Qdrant [website](https://qdrant.tech/)\n",
" - **Create Cluster**: Open the Qdrant dashboard and establish a new cluster. After building a new cluster, you will see an option for creating API_key; copy the URL and API_key from there.\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an acco\n",
" unt on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n",
"\n",
"\n",
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "XclQfDX9MQsw"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte langchain langchain-openai qdrant-client python-dotenv langchainhub"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Hzira-9BQq0h"
},
"source": [
"## Setup Source Gitlab with PyAirbyte\n",
"\n",
"The provided code configures an Airbyte source to extract data from a gitlab.\n",
"\n",
"To configure according to your requirements, you can refer to [this references](https://docs.airbyte.com/integrations/sources/gitlab#reference).\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "-2pgyG5aMGq0"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-gitlab\",\n",
" config={\n",
" \"credentials\":{\n",
" \"auth_type\":\"access_token\",\n",
" \"access_token\": ab.get_secret(\"GITLAB_ACCESS_TOKEN\"),\n",
" },\n",
" \"projects\" :ab.get_secret(\"GITLAB_PROJECT\")\n",
" }\n",
")\n",
"source.check()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "_Phdo7l_MGq2"
},
"outputs": [],
"source": [
"# In this notebook we are focused on only issues stream\n",
"# checkout all stream here : https://docs.airbyte.com/integrations/sources/gitlab#supported-streams\n",
"\n",
"source.get_available_streams()\n",
"source.select_streams([\"issues\"])\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache)\n",
"\n",
"issues_details = [doc for doc in result[\"issues\"].to_documents()] # Fetching data for issues stream only\n",
"\n",
"print(str(issues_details[10]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "jJ5Na_O2Sn1U"
},
"source": [
"# Use Langchain to build a RAG pipeline.\n",
"\n",
"The code uses RecursiveCharacterTextSplitter to break documents into smaller chunks. Metadata within these chunks is converted to strings. This facilitates efficient processing of large texts, enhancing analysis capabilities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wravAgJhMGq3"
},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(issues_details)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"id": "tcXR48fsMGq4"
},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"import os\n",
"\n",
"## Embedding Technique Of OPENAI\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"embeddings=OpenAIEmbeddings()\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Mh_lGwiJUkLg"
},
"source": [
"## Setting up Qdrant\n",
"\n",
"Qdrant is leading open source vector database and similarity search engine designed to handle high-dimensional vectors for performance and massive-scale AI applications.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "uZXgQF-xMGq4"
},
"outputs": [],
"source": [
"from qdrant_client import QdrantClient, models\n",
"\n",
"client = QdrantClient(\n",
" location=ab.get_secret(\"QDRANT_URL\"), # As obtain above\n",
" api_key=ab.get_secret(\"QDRANT_API_KEY\"),\n",
")\n",
"\n",
"collection_name = \"gitlab_issue\" # Give collection a name\n",
"client.create_collection(\n",
" collection_name=collection_name,\n",
" vectors_config=models.VectorParams(\n",
" size=1536, # vector dimensions\n",
" distance=models.Distance.COSINE,\n",
" ),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "nU9KjhMHMGq4"
},
"outputs": [],
"source": [
"from langchain.vectorstores.qdrant import Qdrant\n",
"\n",
"qdrant = Qdrant(\n",
" client=client,\n",
" collection_name=collection_name,\n",
" embeddings=embeddings,\n",
")\n",
"\n",
"qdrant.add_documents(chunked_docs, batch_size=20)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8zwaoOteU7oi"
},
"source": [
"Now setting up a pipeline for RAG using LangChain, incorporating document retrieval from Pinecone, prompt configuration, and a chat model from OpenAI for response generation."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"id": "y2e-raMYMGq4",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "418f679a-dafe-4f21-d443-54e73e8c1fde"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Langchain RAG pipeline set up successfully.\n"
]
}
],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"retriever = qdrant.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"id": "Ihbo8bllMGq4",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "feec51cc-2cfd-4634-e075-baa571b52b5f"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"The programming languages mentioned in the context are Java and JavaScript.\n"
]
}
],
"source": [
"print(rag_chain.invoke(\"Which programing languages are mentioned in issues most?\"))"
]
}
],
"metadata": {
"colab": {
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "myenv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/rag_using_jira_pyairbyte_pinecone.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"id": "82f45520-6a6d-49b2-9dfd-ea33231e7b39",
"metadata": {},
"source": [
"# End-to-End RAG Tutorial Using Jira, PyAirbyte, Pinecone, and LangChain\n",
"\n",
"This notebook demonstrates an end-to-end Retrieval-Augmented Generation (RAG) pipeline. We will extract data from Jira using PyAirbyte, store it in a Pinecone vector store, and then use LangChain to perform RAG on the stored data. This workflow showcases how to integrate these tools to build a scalable RAG system.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **Jira**:\n",
" - Follow the instructions in the [Jira Source Connector Documentation](https://docs.airbyte.com/integrations/sources/jira) to set up your jira airbyte source\n",
"\n",
"2. **Pinecone Account**:\n",
" - **Create a Pinecone Account**: Sign up for an account on the [Pinecone website](https://www.pinecone.io/).\n",
" - **Obtain Pinecone API Key**: Generate a new API key from your Pinecone project settings. For detailed instructions, refer to the [Pinecone documentation](https://docs.pinecone.io/docs/quickstart).\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n"
]
},
{
"cell_type": "markdown",
"id": "318e598e-10b5-4fd3-891f-e316cbc4a341",
"metadata": {},
"source": [
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3954735e-6c45-4d15-b2a2-4fc705c935b7",
"metadata": {},
"outputs": [],
"source": [
"!pip3 install airbyte openai langchain pinecone-client langchain-openai langchain-pinecone langchainhub "
]
},
{
"cell_type": "markdown",
"id": "b5f0ec02-f31c-44ff-ac7e-49af6f608c9e",
"metadata": {},
"source": [
"# Setup Source Jira with PyAirbyte\n",
"\n",
"The provided code configures an Airbyte source to extract issues data from jira data\n",
"\n",
"To configure according to your requirements, you can refer to [this references](https://docs.airbyte.com/integrations/sources/jira#reference).\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "936c79e8-2eff-4d4a-ae51-74d8f14c7005",
"metadata": {},
"outputs": [],
"source": [
"import airbyte as ab\n",
"import json\n",
"\n",
"projects = json.loads(ab.get_secret('projects_list'))\n",
"\n",
"source = ab.get_source(\n",
" \"source-jira\",\n",
" install_if_missing=True,\n",
" config={\n",
" \"api_token\": ab.get_secret('jira_api_token'),\n",
" \"domain\": ab.get_secret('jira_domain') ,\n",
" \"email\": ab.get_secret('jira_email_id'),\n",
" \"start_date\": \"2021-01-01T00:00:00Z\", # optional field, can be ignored \n",
" \"projects\": projects\n",
" },\n",
"\n",
")\n",
"\n",
"# Verify the config and creds by running `check`:\n",
"source.check()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "89205323-0676-4869-bcf6-fbbb31333a5a",
"metadata": {},
"outputs": [],
"source": [
"source.select_streams(['issues']) # Select only issues stream\n",
"read_result: ab.ReadResult = source.read()\n",
"documents_list = []\n",
"\n",
"for key, value in read_result.items():\n",
" docs = value.to_documents()\n",
" for doc in docs:\n",
" documents_list.append(doc)\n",
"\n",
"print(str(documents_list))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6c76d164-8894-41b6-aee6-a3de0a70aa26",
"metadata": {},
"outputs": [],
"source": [
"# store and display the issues stream in data frame\n",
"issues_df = read_result[\"issues\"].to_pandas()\n",
"display(issues_df)"
]
},
{
"cell_type": "markdown",
"id": "770f07e7-9dd6-4bd3-b180-fc322eb1e209",
"metadata": {},
"source": [
"## Use Langchain to build a RAG pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8194e7ba-4aef-4fdf-afef-93a5dee0e36c",
"metadata": {},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores.utils import filter_complex_metadata\n",
"\n",
"\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(documents_list)\n",
"chunked_docs = filter_complex_metadata(chunked_docs)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01eeea04-f774-4a6b-840a-999aed9f80ec",
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"\n",
"embeddings=OpenAIEmbeddings()"
]
},
{
"cell_type": "markdown",
"id": "6ac9b188-2972-42b4-a9bb-7d752b0eeaf3",
"metadata": {},
"source": [
"## Setting up Pinecone\n",
"\n",
"Pinecone is a managed vector database service designed for storing, indexing, and querying high-dimensional vector data efficiently."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1f11593-3ea2-4762-ad23-e21c87d44ece",
"metadata": {},
"outputs": [],
"source": [
"from pinecone import Pinecone, ServerlessSpec\n",
"os.environ['PINECONE_API_KEY'] = ab.get_secret(\"PINECONE_API_KEY\")\n",
"\n",
"index_name = \"airbytejiraindex\"\n",
"\n",
"pc = Pinecone()\n",
"\n",
"# Create pinecone index if not exists otherwise skip this step\n",
"if not (pc.list_indexes()[0]['name'] == index_name):\n",
" pc.create_index(\n",
" name=index_name,\n",
" dimension=1536,\n",
" metric=\"cosine\",\n",
" spec=ServerlessSpec(\n",
" cloud=\"aws\",\n",
" region=\"us-east-1\"\n",
" )\n",
" )\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de7efdc6-4363-4c3e-bd6e-86b955f2e505",
"metadata": {},
"outputs": [],
"source": [
"index = pc.Index(index_name)\n",
"\n",
"index.describe_index_stats()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "98bdb85f-be90-47b0-a778-9232c81427c1",
"metadata": {},
"outputs": [],
"source": [
"from langchain_pinecone import PineconeVectorStore\n",
"\n",
"pinecone = PineconeVectorStore.from_documents(\n",
" chunked_docs, embedding=embeddings, index_name=index_name\n",
")"
]
},
{
"cell_type": "markdown",
"id": "6f5d31fa-6f88-4755-9405-6f7c84495607",
"metadata": {},
"source": [
"## RAG"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "beb7f799-3558-49f4-8c20-2a45ac61c034",
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"retriever = pinecone.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
"\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ae3dd498-74a3-4868-8529-bdad7d949d46",
"metadata": {},
"outputs": [],
"source": [
"print(rag_chain.invoke(\"Summarize the issue of key IT-20\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f62b7761-c463-430d-8150-3e816897cad9",
"metadata": {},
"outputs": [],
"source": [
"print(rag_chain.invoke(\"What is the source data about?\"))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.19"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
================================================
FILE: pyairbyte_notebooks/rag_using_s3_pyairbyte_pinecone.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "3s3Jow7_BjQi"
},
"source": [
"# End-to-End RAG Tutorial Using S3, PyAirbyte, Pinecone, and LangChain\n",
"\n",
"This notebook demonstrates an end-to-end Retrieval-Augmented Generation (RAG) pipeline. We will extract data from an S3 bucket using PyAirbyte, store it in a Pinecone vector store, and then use LangChain to perform RAG on the stored data. This workflow showcases how to integrate these tools to build a scalable RAG system.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **AWS S3 Bucket**:\n",
" - Follow the instructions in the [AWS S3 Source Connector Documentation](https://docs.airbyte.com/integrations/sources/s3) to set up your S3 bucket and obtain the necessary access keys.\n",
"\n",
"2. **Pinecone Account**:\n",
" - **Create a Pinecone Account**: Sign up for an account on the [Pinecone website](https://www.pinecone.io/).\n",
" - **Obtain Pinecone API Key**: Generate a new API key from your Pinecone project settings. For detailed instructions, refer to the [Pinecone documentation](https://docs.pinecone.io/docs/quickstart).\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://platform.openai.com/docs/overview).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://platform.openai.com/api-keys).\n",
"\n",
"\n",
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ij3THvimBjQk"
},
"outputs": [],
"source": [
"# Add virtual environment support in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte openai langchain pinecone-client langchain-openai langchain-pinecone python-dotenv langchainhub"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8jDsEZdiBjQl"
},
"source": [
"## Setup Source S3 with PyAirbyte\n",
"\n",
"The provided code configures an Airbyte source to extract data from an Amazon S3 bucket containing CSV files.\n",
"\n",
"To configure according to your requirements, you can refer to [this references](https://docs.airbyte.com/integrations/sources/s3#reference).\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "U7DxyLVUBjQl"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-s3\",\n",
" config={\n",
" \"streams\": [\n",
" {\n",
" \"name\": \"\",\n",
" \"format\": {\n",
" \"filetype\": \"csv\",\n",
" \"ignore_errors_on_fields_mismatch\": True,\n",
" },\n",
" \"globs\": [\"**\"],\n",
" \"legacy_prefix\": \"\",\n",
" \"validation_policy\": \"Emit Record\",\n",
" }\n",
" ],\n",
" \"bucket\": ab.get_secret(\"S3_BUCKET_NAME\"),\n",
" \"aws_access_key_id\": ab.get_secret(\"AWS_ACCESS_KEY\"),\n",
" \"aws_secret_access_key\": ab.get_secret(\"AWS_SECRET_KEY\"),\n",
" \"region_name\": ab.get_secret(\"AWS_REGION\")\n",
" }\n",
")\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "rtSCjGcQBjQl"
},
"source": [
"This is a basic process of fetching data from an S3 bucket using Airbyte and converting it into a format suitable for further processing or analysis."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "mlg7K8GUBjQm"
},
"outputs": [],
"source": [
"source.select_all_streams() # Select all streams\n",
"read_result = source.read() # Read the data\n",
"documents_list = [doc for value in read_result.values() for doc in value.to_documents()]\n",
"\n",
"print(str(documents_list[10]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "L6KM7KPMBjQm"
},
"source": [
"# Use Langchain to build a RAG pipeline.\n",
"\n",
"The code uses RecursiveCharacterTextSplitter to break documents into smaller chunks. Metadata within these chunks is converted to strings. This facilitates efficient processing of large texts, enhancing analysis capabilities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "LXvSJoUSBjQm"
},
"outputs": [],
"source": [
"\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(documents_list)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kCF7gZTMBjQm"
},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"import os\n",
"\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"## Embedding Technique Of OPENAI\n",
"embeddings=OpenAIEmbeddings()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5TKCIAhYBjQm"
},
"source": [
"## Setting up Pinecone\n",
"\n",
"Pinecone is a managed vector database service designed for storing, indexing, and querying high-dimensional vector data efficiently."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "iXb5YPnhBjQn"
},
"outputs": [],
"source": [
"from pinecone import Pinecone, ServerlessSpec\n",
"from pinecone import Pinecone\n",
"\n",
"os.environ['PINECONE_API_KEY'] = ab.get_secret(\"PINECONE_API_KEY\")\n",
"pc = Pinecone()\n",
"index_name = \"s3-quickstarts-index\" # Replace with your index name\n",
"\n",
"\n",
"# Uncomment this if you have not created a Pinecone index yet\n",
"\n",
"# spec = ServerlessSpec(cloud=\"aws\", region=\"us-east-1\") // Replace with your cloud and region\n",
"# pc.create_index(\n",
"# \"quickstarts\",\n",
"# dimension=1536, // Replace with your model dimensions\n",
"# metric='cosine', // Replace with your model metric\n",
"# spec=spec\n",
"# )\n",
"\n",
"index = pc.Index(index_name)\n",
"\n",
"index.describe_index_stats()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6kRv6s7zBjQn"
},
"source": [
"PineconeVectorStore is a class provided by the LangChain library specifically designed for interacting with Pinecone vector stores.\n",
"from_documents method of PineconeVectorStore is used to create or update vectors in a Pinecone vector store based on the provided documents and their corresponding embeddings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "3hToKOPsBjQn"
},
"outputs": [],
"source": [
"from langchain_pinecone import PineconeVectorStore\n",
"\n",
"pinecone = PineconeVectorStore.from_documents(\n",
" chunked_docs, embeddings, index_name=index_name\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "vj0hSWo2BjQn"
},
"source": [
"Now setting up a pipeline for RAG using LangChain, incorporating document retrieval from Pinecone, prompt configuration, and a chat model from OpenAI for response generation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "N0gE_LbmBjQn"
},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"retriever = pinecone.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "axbwi9j8BjQn"
},
"outputs": [],
"source": [
"print(rag_chain.invoke(\"What are some best documentaries to watch?\"))\n"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "myenv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/rag_using_shopify_pyairbyte_langchain.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "3s3Jow7_BjQi"
},
"source": [
"# End-to-End RAG Tutorial Using Shopify, PyAirbyte, Pinecone, and LangChain\n",
"\n",
"This notebook demonstrates an end-to-end Retrieval-Augmented Generation (RAG) pipeline. We will extract data from an Shopify bucket using PyAirbyte, store it in a Pinecone vector store, and then use LangChain to perform RAG on the stored data. This workflow showcases how to integrate these tools to build a scalable RAG system.\n",
"\n",
"## Prerequisites\n",
"\n",
"1. **Shopify**:\n",
" - Follow the instructions in the [Shopify Connector Docs](https://docs.airbyte.com/integrations/sources/shopify#setup-guide) to set up your Shopify and obtain the necessary access keys.\n",
"\n",
"2. **Pinecone Account**:\n",
" - **Create a Pinecone Account**: Sign up for an account on the [Pinecone website](https://www.pinecone.io/).\n",
" - **Obtain Pinecone API Key**: Generate a new API key from your Pinecone project settings. For detailed instructions, refer to the [Pinecone documentation](https://docs.pinecone.io/docs/quickstart).\n",
"\n",
"3. **OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an acco\n",
" unt on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n",
"\n",
"\n",
"## Install PyAirbyte and other dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "XclQfDX9MQsw"
},
"outputs": [],
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# First, we need to install the necessary libraries.\n",
"!pip3 install airbyte openai langchain pinecone-client langchain-openai langchain-pinecone python-dotenv langchainhub"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Hzira-9BQq0h"
},
"source": [
"## Setup Source Shopify with PyAirbyte\n",
"\n",
"The provided code configures an Airbyte source to extract data from a Shopify store.\n",
"\n",
"To configure according to your requirements, you can refer to [this references](https://docs.airbyte.com/integrations/sources/shopify#reference).\n",
"\n",
"Note: The credentials are retrieved securely using the get_secret() method. This will automatically locate a matching Google Colab secret or environment variable, ensuring they are not hard-coded into the notebook. Make sure to add your key to the Secrets section on the left.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "-2pgyG5aMGq0",
"outputId": "f20c47bf-1dcb-42bd-ddbb-a7b1ab939afe"
},
"outputs": [],
"source": [
"import airbyte as ab\n",
"\n",
"source = ab.get_source(\n",
" \"source-shopify\",\n",
" config={\n",
"\n",
" \"credentials\":{\n",
" # There are two methods available for authentication 'api_password' and 'oauth2.0',\n",
" # Choose one of them (https://docs.airbyte.com/integrations/sources/shopify#airbyte-open-source)\n",
" \"auth_method\": \"api_password\",\n",
" \"api_password\": ab.get_secret(\"API_PASSWORD\")\n",
" },\n",
" \"shop\":ab.get_secret(\"STORE_NAME\"),\n",
" \"start_date\": ab.get_secret(\"START_DATE\"),\n",
" \"bulk_window_in_days\": 30, # change this according to your requirement (Defines what would be a date range per single BULK Job)\n",
" \"fetch_transactions_user_id\": False\n",
" }\n",
")\n",
"source.check()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "GO0QI-qNRNyz"
},
"source": [
"This is a basic process of fetching data from an using Airbyte and converting it into a format suitable for further processing or analysis."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "_Phdo7l_MGq2"
},
"outputs": [],
"source": [
"# List the available streams available for the Shopify source\n",
"# Available Stream for shopify here(https://docs.airbyte.com/integrations/sources/shopify#supported-streams)\n",
"source.get_available_streams()\n",
"# Select the streams we are interested in loading to cache\n",
"source.select_streams([\"products\", \"product_variants\", \"collections\", \"customers\"])\n",
"cache = ab.get_default_cache()\n",
"result = source.read(cache=cache)\n",
"\n",
"product_details = [doc for doc in result[\"products\"].to_documents()] # Fetching data for products stream only\n",
"\n",
"print(str(product_details[10]))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "jJ5Na_O2Sn1U"
},
"source": [
"# Use Langchain to build a RAG pipeline.\n",
"\n",
"The code uses RecursiveCharacterTextSplitter to break documents into smaller chunks. Metadata within these chunks is converted to strings. This facilitates efficient processing of large texts, enhancing analysis capabilities."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wravAgJhMGq3"
},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)\n",
"chunked_docs = splitter.split_documents(product_details)\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"for doc in chunked_docs:\n",
" for md in doc.metadata:\n",
" doc.metadata[md] = str(doc.metadata[md])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "tcXR48fsMGq4"
},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"## Embedding Technique Of OPENAI\n",
"os.environ['OPENAI_API_KEY'] = ab.get_secret(\"OPENAI_API_KEY\")\n",
"embeddings=OpenAIEmbeddings()\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Mh_lGwiJUkLg"
},
"source": [
"## Setting up Pinecone\n",
"\n",
"Pinecone is a managed vector database service designed for storing, indexing, and querying high-dimensional vector data efficiently.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "uZXgQF-xMGq4"
},
"outputs": [],
"source": [
"from pinecone import Pinecone, ServerlessSpec\n",
"from pinecone import Pinecone\n",
"import os\n",
"\n",
"os.environ['PINECONE_API_KEY'] = ab.get_secret(\"PINECONE_API_KEY\")\n",
"pc = Pinecone()\n",
"index_name = \"shopifyproductsindex\" # Replace with your index name\n",
"\n",
"\n",
"# Uncomment this if you have not created a Pinecone index yet\n",
"\n",
"# spec = ServerlessSpec(cloud=\"aws\", region=\"us-east-1\") #Replace with your cloud and region\n",
"# pc.create_index(\n",
"# name=index_name,\n",
"# dimension=1536,\n",
"# metric='cosine',\n",
"# spec=spec\n",
"# )\n",
"\n",
"index = pc.Index(index_name)\n",
"\n",
"index.describe_index_stats()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "evdVEEsTUwhE"
},
"source": [
"PineconeVectorStore is a class provided by the LangChain library specifically designed for interacting with Pinecone vector stores.\n",
"from_documents method of PineconeVectorStore is used to create or update vectors in a Pinecone vector store based on the provided documents and their corresponding embeddings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "nU9KjhMHMGq4"
},
"outputs": [],
"source": [
"from langchain_pinecone import PineconeVectorStore\n",
"\n",
"pinecone = PineconeVectorStore.from_documents(\n",
" chunked_docs, embeddings, index_name=index_name\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8zwaoOteU7oi"
},
"source": [
"Now setting up a pipeline for RAG using LangChain, incorporating document retrieval from Pinecone, prompt configuration, and a chat model from OpenAI for response generation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "y2e-raMYMGq4"
},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain import hub\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"retriever = pinecone.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"print(\"Langchain RAG pipeline set up successfully.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Ihbo8bllMGq4"
},
"outputs": [],
"source": [
"print(rag_chain.invoke(\"What type of products do we sell?\"))"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "myenv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: pyairbyte_notebooks/rag_with_fb_marketing_milvus_lite.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook demonstrates simple RAG (Retrieval-Augmented Generation) pipeline with Facebook Marketing, Milvus Lite and PyAirbyte."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"- [PyAirbyte](https://airbyte.com/product/pyairbyte)\n",
" \n",
" PyAirbyte is an open-source that packages Airbyte connectors and makes them available in Python. We will connect to \n",
" `source-facebook-marketing`, and retrive its streams\n",
"- [Milvus Lite](https://milvus.io/docs/milvus_lite.md)\n",
" \n",
" Milvus Lite is the lightweight version of [Milvus](https://github.com/milvus-io/milvus) that enables vector emdeddings and similarity search\n",
" into the Python application.\n",
"- OpenAI API Key\n",
" \n",
" Go to the [API Keys page](https://platform.openai.com/api-keys) to create the new secret key."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 1. Install and set dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install airbyte pymilvus openai milvus-model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"import airbyte as ab\n",
"from openai import OpenAI\n",
"from pymilvus import MilvusClient\n",
"\n",
"\n",
"# in production, you might want to avoid putting the key here.\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-proj-****\"\n",
"openai_client = OpenAI()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2. Set the source"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Connect `source-facebook-marketing` to fetch streams"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"source = ab.get_source(\n",
" \"source-facebook-marketing\",\n",
" config={\n",
" \"start_date\": \"2024-06-01T00:00:00Z\",\n",
" \"account_id\": \"account\",\n",
" \"access_token\": \"token\"\n",
" },\n",
" install_if_missing=True,\n",
")\n",
"source.check()\n",
"source.select_all_streams()\n",
"result = source.read()\n",
"\n",
"for name, records in result.streams.items():\n",
" print(f\"Streams of {name} has {len(records)} records.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3. Milvus Lite & Text Embedding"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"type\": \"DEBUG\", \"message\": \"Created new connection using: 5a7d992d451b41db831d254213b64892\", \"data\": {}}\n"
]
}
],
"source": [
"milvus_client = MilvusClient(\"./milvus_source_fake.db\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This will create the `milvus_source_fake.db` if this is the first initialization. There are some [limitations](https://milvus.io/docs/milvus_lite.md#Limits), but this quick setup for local development should be enought to test the pipeline."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's focused to get the `products` data. We will keep it simple by just getting the relevant fields:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"data = []\n",
"for record in result.streams[\"products\"]:\n",
" make = record[\"make\"]\n",
" model = record[\"model\"]\n",
" year = record[\"year\"]\n",
"\n",
" text = f\"{make} {model} {year}\"\n",
" data.append(text)\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['Mazda MX-5 2023',\n",
" 'Mercedes-Benz C-Class 2023',\n",
" 'Honda Accord Crosstour 2023',\n",
" 'GMC Jimmy 2023',\n",
" 'Infiniti FX 2023']"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data[:5]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from pymilvus import model\n",
"\n",
"openai_ef = model.dense.OpenAIEmbeddingFunction(\n",
" model_name='text-embedding-3-large', # Specify the model name\n",
" api_key=os.environ[\"OPENAI_API_KEY\"], # Provide your OpenAI API key\n",
" dimensions=512 # Set the embedding dimensionality\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"embedded_data = openai_ef.encode_documents(data)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"type\": \"DEBUG\", \"message\": \"Successfully created collection: products\", \"data\": {}}\n",
"{\"type\": \"DEBUG\", \"message\": \"Successfully created an index on collection: products\", \"data\": {}}\n"
]
}
],
"source": [
"milvus_client.create_collection(\"products\", dimension=512)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"embedded_docs = []\n",
"for _id, embedded_text in enumerate(embedded_data):\n",
" embedded_docs.append({\"id\": _id+1, \"vector\": embedded_text, \"text\": data[_id]})"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'insert_count': 50,\n",
" 'ids': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50],\n",
" 'cost': 0}"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"milvus_client.insert(collection_name=\"products\", data=embedded_docs)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 4. Inspect the search results"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"question = \"Give list of products from Suzuki\""
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"search_res = milvus_client.search(\n",
" collection_name=\"products\",\n",
" data=[\n",
" openai_ef.encode_documents([question])[0]\n",
" ], # Use the `emb_text` function to convert the question to an embedding vector\n",
" limit=3, # Return top 3 results\n",
" search_params={\"metric_type\": \"COSINE\", \"params\": {}}, # Inner product distance\n",
" output_fields=[\"text\"], # Return the text field\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" [\n",
" \"Suzuki SJ 410 2023\",\n",
" 0.5219288468360901\n",
" ],\n",
" [\n",
" \"Isuzu VehiCROSS 2023\",\n",
" 0.38782158493995667\n",
" ],\n",
" [\n",
" \"Jaguar S-Type 2023\",\n",
" 0.35628464818000793\n",
" ]\n",
"]\n"
]
}
],
"source": [
"retrieved_lines_with_distances = [\n",
" (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n",
"]\n",
"print(json.dumps(retrieved_lines_with_distances, indent=4))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 5. Use OpenAI ChatGPT to get the RAG response"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's define the system and user prompts for the Language Model. This prompt is assembled with the retrieved documents from Milvus.\n",
"\n",
"We also use OpenAI ChatGPT to generate a response based on the prompts."
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"context = \"\\n\".join(\n",
" [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"SYSTEM_PROMPT = \"\"\"\n",
"Human: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n",
"\"\"\"\n",
"USER_PROMPT = f\"\"\"\n",
"Use the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n",
"\n",
"{context}\n",
"\n",
"\n",
"{question}\n",
"\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Suzuki SJ 410 2023\n",
"\n"
]
}
],
"source": [
"response = openai_client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
" {\"role\": \"user\", \"content\": USER_PROMPT},\n",
" ],\n",
")\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Summary\n",
"\n",
"This shows how easy to build RAG pipeline in Python for quick local development which helps us to speed our development iterations. All within the comport of Python environment."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "pyairbyte-hackathon",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: pyairbyte_notebooks/rag_with_pyairbyte_and_milvus_lite.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook demonstrates simple RAG (Retrieval-Augmented Generation) pipeline with Milvus Lite and PyAirbyte.\n",
"The focus is to showcase how to set these components for a fully local development in Python."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prerequisites\n",
"- [PyAirbyte](https://airbyte.com/product/pyairbyte)\n",
" \n",
" PyAirbyte is an open-source that packages Airbyte connectors and makes them available in Python. In this tutorial, we will just use the \n",
" `source-faker`, but it's easy to set it up for other sources.\n",
"- [Milvus Lite](https://milvus.io/docs/milvus_lite.md)\n",
" \n",
" Milvus Lite is the lightweight version of [Milvus](https://github.com/milvus-io/milvus) that enables vector emdeddings and similarity search\n",
" into the Python application.\n",
"- OpenAI API Key\n",
" \n",
" Go to the [API Keys page](https://platform.openai.com/api-keys) to create the new secret key."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 1. Install and set dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install airbyte pymilvus openai milvus-model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"import airbyte as ab\n",
"from openai import OpenAI\n",
"from pymilvus import MilvusClient\n",
"\n",
"\n",
"# in production, you might want to avoid putting the key here.\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-proj-****\"\n",
"openai_client = OpenAI()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 2. Set the source"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For simplicity, we will just use `source-faker` to generate some data."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"## Read Progress\n",
"\n",
"Started reading at 08:19:11.\n",
"\n",
"Read **150** records over **1 seconds** (150.0 records / second).\n",
"\n",
"Wrote **150** records over 3 batches.\n",
"\n",
"Finished reading at 08:19:13.\n",
"\n",
"Started finalizing streams at 08:19:13.\n",
"\n",
"Finalized **3** batches over 1 seconds.\n",
"\n",
"Completed 3 out of 3 streams:\n",
"\n",
" - products\n",
" - users\n",
" - purchases\n",
"\n",
"\n",
"Completed writing at 08:19:14. Total time elapsed: 3 seconds\n",
"\n",
"\n",
"------------------------------------------------\n"
],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"
Completed `source-faker` read operation at 15:19:14.\n",
"
\n"
],
"text/plain": [
"Completed `source-faker` read operation at \u001b[1;92m15:19:14\u001b[0m.\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Streams of products has 50 records.\n",
"Streams of users has 50 records.\n",
"Streams of purchases has 50 records.\n"
]
}
],
"source": [
"source = ab.get_source(\n",
" \"source-faker\",\n",
" config={\"count\": 5_0},\n",
" install_if_missing=True,\n",
")\n",
"source.check()\n",
"source.select_all_streams()\n",
"result = source.read()\n",
"\n",
"for name, records in result.streams.items():\n",
" print(f\"Streams of {name} has {len(records)} records.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we can see there are streams of `products`, `users`, and `purchases`. All of them has 50 records."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 3. Milvus Lite & Text Embedding"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"type\": \"DEBUG\", \"message\": \"Created new connection using: 5a7d992d451b41db831d254213b64892\", \"data\": {}}\n"
]
}
],
"source": [
"milvus_client = MilvusClient(\"./milvus_source_fake.db\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This will create the `milvus_source_fake.db` if this is the first initialization. There are some [limitations](https://milvus.io/docs/milvus_lite.md#Limits), but this quick setup for local development should be enought to test the pipeline."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's focused to get the `products` data. We will keep it simple by just getting the relevant fields:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"data = []\n",
"for record in result.streams[\"products\"]:\n",
" make = record[\"make\"]\n",
" model = record[\"model\"]\n",
" year = record[\"year\"]\n",
"\n",
" text = f\"{make} {model} {year}\"\n",
" data.append(text)\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['Mazda MX-5 2023',\n",
" 'Mercedes-Benz C-Class 2023',\n",
" 'Honda Accord Crosstour 2023',\n",
" 'GMC Jimmy 2023',\n",
" 'Infiniti FX 2023']"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data[:5]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from pymilvus import model\n",
"\n",
"openai_ef = model.dense.OpenAIEmbeddingFunction(\n",
" model_name='text-embedding-3-large', # Specify the model name\n",
" api_key=os.environ[\"OPENAI_API_KEY\"], # Provide your OpenAI API key\n",
" dimensions=512 # Set the embedding dimensionality\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"embedded_data = openai_ef.encode_documents(data)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"type\": \"DEBUG\", \"message\": \"Successfully created collection: products\", \"data\": {}}\n",
"{\"type\": \"DEBUG\", \"message\": \"Successfully created an index on collection: products\", \"data\": {}}\n"
]
}
],
"source": [
"milvus_client.create_collection(\"products\", dimension=512)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"embedded_docs = []\n",
"for _id, embedded_text in enumerate(embedded_data):\n",
" embedded_docs.append({\"id\": _id+1, \"vector\": embedded_text, \"text\": data[_id]})"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'insert_count': 50,\n",
" 'ids': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50],\n",
" 'cost': 0}"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"milvus_client.insert(collection_name=\"products\", data=embedded_docs)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 4. Inspect the search results"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"question = \"Give list of products from Suzuki\""
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"search_res = milvus_client.search(\n",
" collection_name=\"products\",\n",
" data=[\n",
" openai_ef.encode_documents([question])[0]\n",
" ], # Use the `emb_text` function to convert the question to an embedding vector\n",
" limit=3, # Return top 3 results\n",
" search_params={\"metric_type\": \"COSINE\", \"params\": {}}, # Inner product distance\n",
" output_fields=[\"text\"], # Return the text field\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[\n",
" [\n",
" \"Suzuki SJ 410 2023\",\n",
" 0.5219288468360901\n",
" ],\n",
" [\n",
" \"Isuzu VehiCROSS 2023\",\n",
" 0.38782158493995667\n",
" ],\n",
" [\n",
" \"Jaguar S-Type 2023\",\n",
" 0.35628464818000793\n",
" ]\n",
"]\n"
]
}
],
"source": [
"retrieved_lines_with_distances = [\n",
" (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n",
"]\n",
"print(json.dumps(retrieved_lines_with_distances, indent=4))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 5. Use OpenAI ChatGPT to get the RAG response"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's define the system and user prompts for the Language Model. This prompt is assembled with the retrieved documents from Milvus.\n",
"\n",
"We also use OpenAI ChatGPT to generate a response based on the prompts."
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"context = \"\\n\".join(\n",
" [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"SYSTEM_PROMPT = \"\"\"\n",
"Human: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n",
"\"\"\"\n",
"USER_PROMPT = f\"\"\"\n",
"Use the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n",
"\n",
"{context}\n",
"\n",
"\n",
"{question}\n",
"\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Suzuki SJ 410 2023\n",
"\n"
]
}
],
"source": [
"response = openai_client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
" {\"role\": \"user\", \"content\": USER_PROMPT},\n",
" ],\n",
")\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Summary\n",
"\n",
"This shows how easy to build RAG pipeline in Python for quick local development which helps us to speed our development iterations. All within the comport of Python environment."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "pyairbyte-hackathon",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
================================================
FILE: pyairbyte_notebooks/sentiment_analysis_airbyte_gsheets_snowflakecortex.ipynb
================================================
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# Sentiment analysis using Airbyte Cloud, Google sheets, and Snowflake Cortex\n",
"\n"
],
"metadata": {
"id": "SK1X63WQ0xNk"
}
},
{
"cell_type": "markdown",
"source": [
"## Setup Airbyte Source as Google Sheets\n",
"\n",
"\n",
"\n",
"* Login to your airbyte cloud account and Select Google sheets as source\n",
"* Provide the service account json and Spreadsheet link as per the airbyte documentation mentioned [here](https://docs.airbyte.com/integrations/sources/google-sheets#set-up-the-google-sheets-source-connector-in-airbyte)\n",
"\n"
],
"metadata": {
"id": "WMVZ-XWi1iVy"
}
},
{
"cell_type": "markdown",
"source": [
"### Setup Source - Google sheets Airbyte Cloud Screenshot"
],
"metadata": {
"id": "NSkQlOaP0_3M"
}
},
{
"cell_type": "markdown",
"source": [
"## Setup Airbyte Destination as Snowflake Cortex\n",
"\n",
"\n",
"* Login to your airbyte cloud account and Select Snowflake Cortex as destination\n",
"* Provide the required credentials per the airbyte documentation mentioned [here](https://docs.airbyte.com/integrations/destinations/snowflake-cortex#prerequisites)\n"
],
"metadata": {
"id": "eZp_Tar02PuC"
}
},
{
"cell_type": "markdown",
"source": [
"## Setup Destination - Snowflake Cortex Airbyte Cloud Screenshot\n",
"\n",
"\n",
"\n",
"\n",
""
],
"metadata": {
"id": "Zo_W8X5C5Rmy"
}
},
{
"cell_type": "markdown",
"source": [
"## Setting up the connector\n",
"\n",
"Setup the connection between Airbyte Google Sheets source and Snowflake Cortex destination\n",
"\n",
"\n"
],
"metadata": {
"id": "0cJlfH2E586B"
}
},
{
"cell_type": "markdown",
"source": [
"## Install PyAirbyte and other dependencies"
],
"metadata": {
"id": "O2m0TjAWkcGs"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "5DC5-hZUkYSD"
},
"outputs": [],
"source": [
"!pip3 install airbyte openai langchain-openai snowflake-connector-python langchain_core"
]
},
{
"cell_type": "markdown",
"source": [
"## Query the Records from Snowflake Cortex Airbyte Destination"
],
"metadata": {
"id": "lLUe1eM8D0R4"
}
},
{
"cell_type": "code",
"source": [
"from snowflake import connector\n",
"from google.colab import userdata\n",
"from typing import List\n",
"import pandas as pd\n",
"\n",
"def get_db_connection():\n",
" return connector.connect(\n",
" account=userdata.get(\"SNOWFLAKE_HOST\"),\n",
" role=userdata.get(\"SNOWFLAKE_ROLE\"),\n",
" warehouse=userdata.get(\"SNOWFLAKE_WAREHOUSE\"),\n",
" database=userdata.get(\"SNOWFLAKE_DATABASE\"),\n",
" schema=userdata.get(\"SNOWFLAKE_SCHEMA\"),\n",
" user=userdata.get(\"SNOWFLAKE_USERNAME\"),\n",
" password=userdata.get(\"SNOWFLAKE_PASSWORD\"),\n",
" )\n",
"\n",
"def fetch_table_data(table_name, columns):\n",
" conn = get_db_connection()\n",
" cursor = conn.cursor()\n",
"\n",
" # Construct the query\n",
" columns_str = \", \".join(columns)\n",
" query = f\"SELECT {columns_str} FROM {table_name} LIMIT 5;\"\n",
"\n",
" cursor.execute(query)\n",
" result = cursor.fetchall()\n",
"\n",
" # Get column names\n",
" col_names = [desc[0] for desc in cursor.description]\n",
"\n",
" cursor.close()\n",
" conn.close()\n",
"\n",
" # Convert the result to a pandas DataFrame\n",
" df = pd.DataFrame(result, columns=col_names)\n",
" return df;\n",
"\n",
"\n",
"\n",
"data_frame = fetch_table_data(\"AMAZON_REVIEWS\", [\"DOCUMENT_ID\",\"DOCUMENT_CONTENT\"])\n",
"data_frame\n",
"\n"
],
"metadata": {
"id": "dFY3Rf6cliaU",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 206
},
"outputId": "3c8d4ca9-e2b6-4f29-a9e1-2e245e10731d"
},
"execution_count": 2,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
" DOCUMENT_ID \\\n",
"0 147706135155296294706750175152529247047 \n",
"1 310610605518759482714931752273988313361 \n",
"2 321811899600750519985540060710072527880 \n",
"3 161308189025596278679660362756706325714 \n",
"4 320500296319786972506727202798756081031 \n",
"\n",
" DOCUMENT_CONTENT \n",
"0 \"reviewerName: 0mie\\noverall: 5\\nreviewText: P... \n",
"1 \"reviewerName: 1K3\\noverall: 4\\nreviewText: it... \n",
"2 \"reviewerName: 1m2\\noverall: 5\\nreviewText: Th... \n",
"3 \"reviewerName: 2&1/2Men\\noverall: 5\\nrevie... \n",
"4 \"reviewerName: 2Cents!\\noverall: 5\\nreviewText... "
],
"text/html": [
"\n",
"
\n"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "dataframe",
"variable_name": "data_frame",
"summary": "{\n \"name\": \"data_frame\",\n \"rows\": 5,\n \"fields\": [\n {\n \"column\": \"DOCUMENT_ID\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"310610605518759482714931752273988313361\",\n \"320500296319786972506727202798756081031\",\n \"321811899600750519985540060710072527880\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"DOCUMENT_CONTENT\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"\\\"reviewerName: 1K3\\\\noverall: 4\\\\nreviewText: it works as expected. I should have sprung for the higher capacity. I think its made a bit cheesier than the earlier versions; the paint looks not as clean as before\\\\nreviewTime: 2012-12-23\\\\nday_diff: 715\\\\nhelpful_yes: 0\\\\nhelpful_no: 0\\\\ntotal_vote: 0\\\\nscore_pos_neg_diff: 0\\\\nscore_average_rating: 0\\\\nwilson_lower_bound: 0\\\"\",\n \"\\\"reviewerName: 2Cents!\\\\noverall: 5\\\\nreviewText: It's mini storage. It doesn't do anything else and it's not supposed to. I purchased it to add additional storage to my Microsoft Surface Pro tablet which only come in 64 and 128 GB. It does what it's supposed to and SanDisk has a long standing reputation that speaks for itself.\\\\nreviewTime: 2013-04-29\\\\nday_diff: 588\\\\nhelpful_yes: 0\\\\nhelpful_no: 0\\\\ntotal_vote: 0\\\\nscore_pos_neg_diff: 0\\\\nscore_average_rating: 0\\\\nwilson_lower_bound: 0\\\"\",\n \"\\\"reviewerName: 1m2\\\\noverall: 5\\\\nreviewText: This think has worked out great.Had a diff. bran 64gb card and if went south after 3 months.This one has held up pretty well since I had my S3, now on my Note3.*** update 3/21/14I've had this for a few months and have had ZERO issue's since it was transferred from my S3 to my Note3 and into a note2. This card is reliable and solid!Cheers!\\\\nreviewTime: 2013-11-21\\\\nday_diff: 382\\\\nhelpful_yes: 0\\\\nhelpful_no: 0\\\\ntotal_vote: 0\\\\nscore_pos_neg_diff: 0\\\\nscore_average_rating: 0\\\\nwilson_lower_bound: 0\\\"\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
}
},
"metadata": {},
"execution_count": 2
}
]
},
{
"cell_type": "markdown",
"source": [
"## Run Sentiment analysis from Snowflake Cortex Airbyte Destination Records"
],
"metadata": {
"id": "fqXE5ed4GqBD"
}
},
{
"cell_type": "code",
"source": [
"from snowflake import connector\n",
"from IPython.display import display, HTML\n",
"\n",
"def get_db_connection():\n",
" return connector.connect(\n",
" account=userdata.get(\"SNOWFLAKE_HOST\"),\n",
" role=userdata.get(\"SNOWFLAKE_ROLE\"),\n",
" warehouse=userdata.get(\"SNOWFLAKE_WAREHOUSE\"),\n",
" database=userdata.get(\"SNOWFLAKE_DATABASE\"),\n",
" schema=userdata.get(\"SNOWFLAKE_SCHEMA\"),\n",
" user=userdata.get(\"SNOWFLAKE_USERNAME\"),\n",
" password=userdata.get(\"SNOWFLAKE_PASSWORD\"),\n",
" )\n",
"\n",
"\n",
"def get_sentiment_analysis_from_snowflake(table_name):\n",
" conn = get_db_connection()\n",
" cur = conn.cursor()\n",
"\n",
"\n",
" query = f\"\"\"\n",
" SELECT\n",
" DOCUMENT_CONTENT,\n",
" CASE\n",
" WHEN SENTIMENT_SCORE > 0 THEN 'Positive'\n",
" WHEN SENTIMENT_SCORE < 0 THEN 'Negative'\n",
" ELSE 'Neutral'\n",
" END AS SENTIMENT\n",
" FROM (\n",
" SELECT\n",
" SNOWFLAKE.CORTEX.SENTIMENT(DOCUMENT_CONTENT) AS SENTIMENT_SCORE,\n",
" DOCUMENT_CONTENT\n",
" FROM {table_name}\n",
" LIMIT 5\n",
" );\n",
" \"\"\"\n",
" cur.execute(query)\n",
" result = cur.fetchall()\n",
" col_names = [desc[0] for desc in cur.description]\n",
"\n",
" df = pd.DataFrame(result,columns=col_names)\n",
"\n",
" return df\n",
"\n",
"\n",
"\n",
"\n",
"response = get_sentiment_analysis_from_snowflake(\"amazon_reviews\")\n",
"\n",
"display(HTML(response.to_html(index=False)))"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 345
},
"id": "yGn9pM7cr_6h",
"outputId": "d69ac85e-bcc6-457d-855e-3a12b57c91ee"
},
"execution_count": 5,
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
""
],
"text/html": [
"
\n",
" \n",
"
\n",
"
DOCUMENT_CONTENT
\n",
"
SENTIMENT
\n",
"
\n",
" \n",
" \n",
"
\n",
"
\"reviewerName: 0mie\\noverall: 5\\nreviewText: Purchased this for my device, it worked as advertised. You can never have too much phone memory, since I download a lot of stuff this was a no brainer for me.\\nreviewTime: 2013-10-25\\nday_diff: 409\\nhelpful_yes: 0\\nhelpful_no: 0\\ntotal_vote: 0\\nscore_pos_neg_diff: 0\\nscore_average_rating: 0\\nwilson_lower_bound: 0\"
\n",
"
Negative
\n",
"
\n",
"
\n",
"
\"reviewerName: 1K3\\noverall: 4\\nreviewText: it works as expected. I should have sprung for the higher capacity. I think its made a bit cheesier than the earlier versions; the paint looks not as clean as before\\nreviewTime: 2012-12-23\\nday_diff: 715\\nhelpful_yes: 0\\nhelpful_no: 0\\ntotal_vote: 0\\nscore_pos_neg_diff: 0\\nscore_average_rating: 0\\nwilson_lower_bound: 0\"
\n",
"
Negative
\n",
"
\n",
"
\n",
"
\"reviewerName: 1m2\\noverall: 5\\nreviewText: This think has worked out great.Had a diff. bran 64gb card and if went south after 3 months.This one has held up pretty well since I had my S3, now on my Note3.*** update 3/21/14I've had this for a few months and have had ZERO issue's since it was transferred from my S3 to my Note3 and into a note2. This card is reliable and solid!Cheers!\\nreviewTime: 2013-11-21\\nday_diff: 382\\nhelpful_yes: 0\\nhelpful_no: 0\\ntotal_vote: 0\\nscore_pos_neg_diff: 0\\nscore_average_rating: 0\\nwilson_lower_bound: 0\"
\n",
"
Positive
\n",
"
\n",
"
\n",
"
\"reviewerName: 2&1/2Men\\noverall: 5\\nreviewText: Bought it with Retail Packaging, arrived legit, in a orange envelope, english version not asian like the picture shows. arrived quickly, bought a 32 and 16 both retail packaging for my htc one sv and Lg Optimus, both cards in working order, probably best price you'll get for a nice sd card\\nreviewTime: 2013-07-13\\nday_diff: 513\\nhelpful_yes: 0\\nhelpful_no: 0\\ntotal_vote: 0\\nscore_pos_neg_diff: 0\\nscore_average_rating: 0\\nwilson_lower_bound: 0\"
\n",
"
Negative
\n",
"
\n",
"
\n",
"
\"reviewerName: 2Cents!\\noverall: 5\\nreviewText: It's mini storage. It doesn't do anything else and it's not supposed to. I purchased it to add additional storage to my Microsoft Surface Pro tablet which only come in 64 and 128 GB. It does what it's supposed to and SanDisk has a long standing reputation that speaks for itself.\\nreviewTime: 2013-04-29\\nday_diff: 588\\nhelpful_yes: 0\\nhelpful_no: 0\\ntotal_vote: 0\\nscore_pos_neg_diff: 0\\nscore_average_rating: 0\\nwilson_lower_bound: 0\"
\n",
"
Negative
\n",
"
\n",
" \n",
"
"
]
},
"metadata": {}
}
]
}
]
}
================================================
FILE: pyairbyte_notebooks/using_langchain_airbyte_package.ipynb
================================================
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# **Illustrating the usage of *langchain_airbyte* package**\n",
"\n",
"The `langchain-airbyte` package integrates LangChain with Airbyte. \n",
"\n",
"It has a very powerful function `AirbyteLoader` which can be used to load data as document into langchain from any Airbyte source! \n",
"\n",
"This notebook demonstrates the usage of `langchain_airbyte` to load data from an Airbyte source (Github Repository) , store the data into a vector database, and perform a basic QnA on that data using FAISS and OpenAI embeddings.\n",
"\n"
],
"metadata": {
"id": "5eHLkb0FkdXz"
}
},
{
"cell_type": "markdown",
"source": [
"# **Prerequisite**\n",
"**1) OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n",
"\n",
"**2) Github Personal Access Token**:\n",
" - **Create a Github Account**: Sign up for an account on [Github](https://www.github.com/).\n",
" - **Generate an API Key**: Cick on your profile icon->Settings->Developer Settings and generate a new API key. For detailed instructions, refer to the [Github documentation](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens).\n",
" \n",
"\n"
],
"metadata": {
"id": "xwF4ZyZp2ji0"
}
},
{
"cell_type": "markdown",
"source": [
"## **Installing Dependencies**\n",
"Lets start by installing all the required dependencies! \n",
"First of all we will create a virtual environment and then begin installing the dependencies.\n"
],
"metadata": {
"id": "sC1_LEcYoU1x"
}
},
{
"cell_type": "code",
"source": [
"# Add virtual environment support for running in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"#Installing the necessary libraries\n",
"!pip3.10 install -qU langchain-airbyte faiss-cpu langchain-community langchain-openai"
],
"metadata": {
"id": "7lwuMXN3ocjE"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## **Load Data using AirbyteLoader**\n",
"Now we use `AirbyteLoader` to fetch data from the source `source-github`. \n",
"You may use any other source you require, but fetch the data accordingly! \n",
"Dont forget to add all the required fields! \n",
"Refer the guide for your source [here](https://docs.airbyte.com/integrations/sources/)\n",
"\n",
"For more information regarding this package [refer](https://python.langchain.com/v0.2/docs/integrations/document_loaders/airbyte/)\n",
"\n",
"The last step of converting data to documents ensures that the raw data (GitHub commits) is converted into a standardized format that includes both the main content and any associated metadata."
],
"metadata": {
"id": "MdmoHC1Fo7fn"
}
},
{
"cell_type": "code",
"source": [
"from langchain_airbyte import AirbyteLoader\n",
"from langchain.schema import Document\n",
"\n",
"# Configure the AirbyteLoader to load data from a GitHub repository\n",
"loader = AirbyteLoader(\n",
" source=\"source-github\",\n",
" stream=\"commits\",\n",
" config={\n",
" \"credentials\": {\n",
" \"personal_access_token\": \"your_personal_access_token\"\n",
" },\n",
" \"repositories\": [\"your_username/repository_name\"]\n",
" }\n",
")\n",
"\n",
"# Load documents from the specified GitHub source\n",
"docs = loader.load()\n",
"\n",
"# Convert incoming stream data into documents\n",
"docs = [Document(page_content=record.page_content, metadata=record.metadata) for record in docs]\n"
],
"metadata": {
"id": "q-9PC6bzpQMp"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## **Split Documents into Chunks and Store these Chunks in Vector Store using FAISS**\n",
"Large documents are split into smaller chunks to make them easier to handle. This also helps in improving the efficiency of the retrieval process, as smaller chunks can be more relevant to specific queries. \n",
"\n",
"The chunks of documents are transformed into vectors using an embedding model (OpenAI embeddings). \n",
"These vectors are then stored in a FAISS vector store, which allows for efficient similarity search. \n",
"The vector store indexes the vectors and enables fast retrieval of similar vectors based on a query."
],
"metadata": {
"id": "61JWR7I_p7g5"
}
},
{
"cell_type": "code",
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"\n",
"splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=30)\n",
"chunked_docs = splitter.split_documents(docs)\n",
"\n",
"print(f\"Created {len(chunked_docs)} document chunks.\")\n",
"\n",
"# Store Chunks in Vector Store using FAISS\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"import os\n",
"\n",
"# Set the OpenAI API Key (make sure to set your own API key here)\n",
"os.environ['OPENAI_API_KEY'] = \"YOUR_OPENAI_API_KEY\"\n",
"\n",
"# Ensure filtered_docs is not empty\n",
"if not chunked_docs:\n",
" raise ValueError(\"No valid documents to store in the vector store.\")\n",
"\n",
"# Store document chunks in FAISS vector store\n",
"embeddings = OpenAIEmbeddings(openai_api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"vector_store = FAISS.from_texts([doc.page_content for doc in chunked_docs], embeddings)\n",
"\n",
"print(\"Chunks successfully stored in vectorstore.\")"
],
"metadata": {
"id": "vqI6yNtGqJC7"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## **Perform QnA on Stored Data**\n",
"Finally we perform the Question And Answer here. \n",
"\n",
"When a query is made, the vector store retrieves relevant document chunks based on their vector similarity to the query.\n",
"The language model (OpenAI) then generates answers based on the retrieved chunks."
],
"metadata": {
"id": "xUuF_RUvqVZM"
}
},
{
"cell_type": "code",
"source": [
"# Step 5: Perform QnA on Stored Data\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"\n",
"# Initialize the LLM (OpenAI)\n",
"llm = OpenAI(openai_api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"\n",
"# Create a QnA chain\n",
"qa_chain = load_qa_chain(llm=llm, chain_type=\"stuff\")\n",
"\n",
"# Perform a QnA\n",
"query = \"What are the latest commits in the repository?\"\n",
"inputs = {\"question\": query, \"input_documents\": chunked_docs}\n",
"answer = qa_chain.invoke(inputs)\n",
"\n",
"print(\"QnA Result:\", answer)\n"
],
"metadata": {
"id": "tHDAkPjYqXDb"
},
"execution_count": null,
"outputs": []
}
]
}
================================================
FILE: satisfaction_analytics_zendesk_support/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: satisfaction_analytics_zendesk_support/README.md
================================================
# Customer Satisfaction Analytics Stack With Zendesk Support, Airbyte, Dbt, Dagster and BigQuery
Welcome to the "Customer Satisfaction Analytics Stack" repository! ✨ This is your go-to place to easily set up a data stack using Zendesk Support, Airbyte, Dbt, BigQuery, and Dagster. With this setup, you can pull Zendesk Support data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:
.svg>)
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add satisfaction_analytics_zendesk_support
```
2. **Navigate to the directory**:
```bash
cd satisfaction_analytics_zendesk_support
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Customer Satisfaction Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/analysis/.gitkeep
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/marts/analyze_satisfaction_score_over_time.sql
================================================
SELECT
DATE_TRUNC(created_at, DAY) AS date,
AVG(CAST(score AS FLOAT64)) AS avg_satisfaction_score
FROM
transformed_data.stg_satisfaction_ratings
WHERE
score IS NOT NULL
GROUP BY
date
ORDER BY
date
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/marts/avarage_satisfaction_rating.sql
================================================
SELECT
AVG(CAST(score AS FLOAT64)) AS avg_satisfaction_score
FROM
transformed_data.stg_satisfaction_ratings
WHERE
score IS NOT NULL
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/marts/feedback_analysis_for_low_score.sql
================================================
SELECT
reason,
COUNT(*) AS count
FROM
transformed_data.stg_satisfaction_ratings
WHERE
CAST(score AS INT64) <= 2
AND reason IS NOT NULL
GROUP BY
reason
ORDER BY
count DESC
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/marts/trend_analysis_by_score.sql
================================================
SELECT
score,
COUNT(*) AS count
FROM
transformed_data.stg_satisfaction_ratings
WHERE
score IS NOT NULL
GROUP BY
score
ORDER BY
score
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/sources/zendesk_support_sources.yml
================================================
version: 2
sources:
- name: zendesk_support
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: zendesk_airbyte_trial
tables:
- name: users
description: "Simulated user data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the user."
- name: active
- name: alias
- name: chat_only
- name: created_at
- name: custom_role_id
- name: default_group_id
- name: details
- name: email
- name: external_id
- name: updated_at
- name: iana_time_zone
- name: last_login_at
- name: locale
- name: locale_id
- name: moderator
- name: name
- name: notes
- name: only_private_comments
- name: organization_id
- name: permanently_deleted
- name: phone
- name: photo
- name: report_csv
- name: restricted_agent
- name: role
- name: role_type
- name: shared
- name: shared_agent
- name: shared_phone_number
- name: signature
- name: suspended
- name: tags
- name: ticket_restriction
- name: time_zone
- name: two_factor_auth_enabled
- name: url
- name: user_fields
- name: verified
- name: tickets
description: "Simulated tickets data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket."
- name: updated_at
- name: allow_attachments
- name: allow_channelback
- name: assignee_id
- name: brand_id
- name: collaborator_ids
- name: created_at
- name: custom_fields
- name: custom_status_id
- name: description
- name: due_at
- name: email_cc_ids
- name: external_id
- name: fields
- name: follower_ids
- name: followup_ids
- name: forum_topic_id
- name: from_messaging_channel
- name: generated_timestamp
- name: group_id
- name: has_incidents
- name: is_public
- name: organization_id
- name: priority
- name: problem_id
- name: raw_subject
- name: recipient
- name: requester_id
- name: satisfaction_rating
- name: sharing_agreement_ids
- name: status
- name: subject
- name: submitter_id
- name: tags
- name: ticket_form_id
- name: type
- name: url
- name: via
- name: satisfaction_ratings
description: "Simulated satisfaction_ratings data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the satisfaction_ratings."
- name: updated_at
- name: created_at
- name: assignee_id
- name: comment
- name: group_id
- name: reason
- name: reason_id
- name: requester_id
- name: score
- name: ticket_id
- name: url
- name: brands
description: "Simulated brands data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the brands."
- name: updated_at
- name: created_at
- name: url
- name: active
- name: brand_url
- name: default
- name: has_help_center
- name: help_center_state
- name: host_mapping
- name: is_deleted
- name: logo
- name: name
- name: signature_template
- name: subdomain
- name: ticket_form_ids
- name: groups
description: "Simulated groups data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the groups."
- name: updated_at
- name: created_at
- name: url
- name: deleted
- name: description
- name: default
- name: is_public
- name: name
- name: organizations
description: "Simulated organizations data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the organizations."
- name: updated_at
- name: created_at
- name: url
- name: details
- name: domain_names
- name: external_id
- name: group_id
- name: notes
- name: organization_fields
- name: deleted_at
- name: shared_comments
- name: name
- name: shared_tickets
- name: tags
- name: tags
description: "Simulated tags data from the Zendesk Support connector."
columns:
- name: name
- name: count
- name: ticket_audits
description: "Simulated ticket_audits data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_audits."
- name: author_id
- name: created_at
- name: events
- name: metadata
- name: ticket_id
- name: via
- name: ticket_comments
description: "Simulated ticket_comments data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_comments."
- name: attachments
- name: created_at
- name: audit_id
- name: author_id
- name: body
- name: event_type
- name: html_body
- name: metadata
- name: plain_body
- name: public
- name: ticket_id
- name: timestamp
- name: type
- name: uploads
- name: via
- name: via_reference_id
- name: ticket_fields
description: "Simulated ticket_fields data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_fields."
- name: active
- name: created_at
- name: agent_description
- name: collapsed_for_agents
- name: custom_field_options
- name: custom_statuses
- name: description
- name: editable_in_portal
- name: key
- name: position
- name: raw_description
- name: raw_title
- name: raw_title_in_portal
- name: regexp_for_validation
- name: removable
- name: required
- name: required_in_portal
- name: sub_type_id
- name: system_field_options
- name: tag
- name: title
- name: title_in_portal
- name: type
- name: updated_at
- name: url
- name: visible_in_portal
- name: ticket_forms
description: "Simulated ticket_forms data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_forms."
- name: active
- name: created_at
- name: agent_conditions
- name: default
- name: display_name
- name: end_user_conditions
- name: end_user_visible
- name: in_all_brands
- name: name
- name: position
- name: raw_display_name
- name: raw_name
- name: restricted_brand_ids
- name: ticket_field_ids
- name: updated_at
- name: url
- name: ticket_metric_events
description: "Simulated ticket_metric_events data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_metric_events."
- name: instance_id
- name: metric
- name: time
- name: type
- name: ticket_id
- name: ticket_metrics
description: "Simulated ticket_metrics data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_metrics."
- name: agent_wait_time_in_minutes
- name: created_at
- name: assigned_at
- name: assignee_stations
- name: assignee_updated_at
- name: custom_status_updated_at
- name: first_resolution_time_in_minutes
- name: full_resolution_time_in_minutes
- name: group_stations
- name: initially_assigned_at
- name: instance_id
- name: latest_comment_added_at
- name: metric
- name: on_hold_time_in_minutes
- name: updated_at
- name: url
- name: reopens
- name: replies
- name: reply_time_in_minutes
- name: requester_updated_at
- name: requester_wait_time_in_minutes
- name: solved_at
- name: status
- name: status_updated_at
- name: ticket_id
- name: time
- name: type
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_brands.sql
================================================
select
*
from {{ source('zendesk_support', 'brands') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_groups.sql
================================================
select
*
from {{ source('zendesk_support', 'groups') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_organizations.sql
================================================
select
*
from {{ source('zendesk_support', 'organizations') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_satisfaction_ratings.sql
================================================
select
*
from {{ source('zendesk_support', 'satisfaction_ratings') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_tags.sql
================================================
select
*
from {{ source('zendesk_support', 'tags') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_audits.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_audits') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_comments.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_comments') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_fields.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_fields') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_forms.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_forms') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_metric_events.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_metric_events') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_metrics.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_metrics') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_tickets.sql
================================================
select
*
from {{ source('zendesk_support', 'tickets') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/models/staging/stg_users.sql
================================================
select
*
from {{ source('zendesk_support', 'users') }}
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: satisfaction_analytics_zendesk_support/dbt_project/tests/.gitkeep
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: satisfaction_analytics_zendesk_support/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: satisfaction_analytics_zendesk_support/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_zendesk_support" "my_source_zendesksupport" {
configuration = {
source_type = "zendesk-support"
credentials = {
source_zendesk_support_authentication_api_token = {
credentials = "api_token"
api_token = var.api_token
email = var.email
}
}
start_date = "2020-10-15T00:00:00Z"
subdomain = "self3836"
}
name = "Zendesk Support"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "zendesk_support_bigquery" {
name = "Zendesk Support to BigQuery"
source_id = airbyte_source_zendesk_support.my_source_zendesksupport.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "users"
},
{
name = "tickets"
},
{
name = "satisfaction_ratings"
},
{
name = "ticket_metrics"
},
{
name = "ticket_metric_events"
},
{
name = "ticket_comments"
},
{
name = "ticket_audits"
}
]
}
}
================================================
FILE: satisfaction_analytics_zendesk_support/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: satisfaction_analytics_zendesk_support/infra/airbyte/variables.tf
================================================
variable "api_token" {
type = string
}
variable "email" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/orchestration/__init__.py
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password="password"
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance,)
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/ad21fadd-c131-4a7c-98a7-fa5ad3a929de/compute_logs/mdvhnoik.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/ad21fadd-c131-4a7c-98a7-fa5ad3a929de/compute_logs/mdvhnoik.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/ad21fadd-c131-4a7c-98a7-fa5ad3a929de/compute_logs/mdvhnoik.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/ad21fadd-c131-4a7c-98a7-fa5ad3a929de/compute_logs/uzgmeijp.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/ad21fadd-c131-4a7c-98a7-fa5ad3a929de/compute_logs/uzgmeijp.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/ad21fadd-c131-4a7c-98a7-fa5ad3a929de/compute_logs/uzgmeijp.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/f7507115-918d-443f-ab91-a065e84fa403/compute_logs/aeebjmfa.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/f7507115-918d-443f-ab91-a065e84fa403/compute_logs/aeebjmfa.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/f7507115-918d-443f-ab91-a065e84fa403/compute_logs/aeebjmfa.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/f7507115-918d-443f-ab91-a065e84fa403/compute_logs/zqbkkiww.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/f7507115-918d-443f-ab91-a065e84fa403/compute_logs/zqbkkiww.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmp3ks7pwhz/storage/f7507115-918d-443f-ab91-a065e84fa403/compute_logs/zqbkkiww.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/0bc4e544-546d-44df-b79c-e75413c56ecb/compute_logs/xozgecli.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/0bc4e544-546d-44df-b79c-e75413c56ecb/compute_logs/xozgecli.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/0bc4e544-546d-44df-b79c-e75413c56ecb/compute_logs/xozgecli.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/0bc4e544-546d-44df-b79c-e75413c56ecb/compute_logs/yyxjctam.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/0bc4e544-546d-44df-b79c-e75413c56ecb/compute_logs/yyxjctam.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/0bc4e544-546d-44df-b79c-e75413c56ecb/compute_logs/yyxjctam.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/1eac78ed-12d1-4147-9c48-79b27dd586ed/compute_logs/iqvvuhde.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/1eac78ed-12d1-4147-9c48-79b27dd586ed/compute_logs/iqvvuhde.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/1eac78ed-12d1-4147-9c48-79b27dd586ed/compute_logs/iqvvuhde.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/1eac78ed-12d1-4147-9c48-79b27dd586ed/compute_logs/izklbfmq.complete
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/1eac78ed-12d1-4147-9c48-79b27dd586ed/compute_logs/izklbfmq.err
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/orchestration/tmpb3ctnsbk/storage/1eac78ed-12d1-4147-9c48-79b27dd586ed/compute_logs/izklbfmq.out
================================================
================================================
FILE: satisfaction_analytics_zendesk_support/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: shopping_cart_analytics_shopify/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: shopping_cart_analytics_shopify/README.md
================================================
# Shopping Cart Analytics Stack With Shopify, Airbyte, Dbt, Dagster and BigQuery
Welcome to the "Shopping Cart Analytics Stack" repository! ✨ This is your go-to place to easily set up a data stack using Shopify, Airbyte, Dbt, BigQuery, and Dagster. With this setup, you can pull Shopify data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:
.svg>)
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add shopping_cart_analytics_shopify
```
2. **Navigate to the directory**:
```bash
cd shopping_cart_analytics_shopify
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Shopify and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Shopping Cart Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/models/marts/abandoned_checkout_ratio.sql
================================================
SELECT
COUNTIF(completed_at IS NULL) AS abandoned_checkouts,
COUNT(*) AS total_checkouts,
COUNTIF(completed_at IS NULL) / COUNT(*) AS abandonment_rate
FROM
transformed_data.stg_abandoned_checkouts
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/models/marts/location_based_abandoned_checkouts.sql
================================================
WITH abandoned_checkouts AS (
SELECT
*,
IF(billing_address IS NOT NULL, JSON_EXTRACT_SCALAR(billing_address, '$.country'), JSON_EXTRACT_SCALAR(shipping_address, '$.country')) AS checkout_country
FROM
transformed_data.stg_abandoned_checkouts
)
SELECT
checkout_country,
COUNTIF(completed_at IS NULL) AS abandoned_checkouts
FROM
abandoned_checkouts
GROUP BY
checkout_country
ORDER BY
abandoned_checkouts DESC
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/models/marts/most_abandoned_products.sql
================================================
SELECT
JSON_EXTRACT_SCALAR(line_items, '$.title') AS product_title,
COUNTIF(completed_at IS NULL) AS abandoned_checkouts
FROM
transformed_data.stg_abandoned_checkouts
CROSS JOIN UNNEST(JSON_EXTRACT_ARRAY(line_items)) AS items
GROUP BY
product_title
ORDER BY
abandoned_checkouts DESC
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/models/marts/time_based.sql
================================================
SELECT
EXTRACT(DAYOFWEEK FROM created_at) AS day_of_week,
EXTRACT(HOUR FROM created_at) AS hour_of_day,
COUNTIF(completed_at IS NULL) AS abandoned_checkouts
FROM
transformed_data.stg_abandoned_checkouts
GROUP BY
day_of_week,
hour_of_day
ORDER BY
day_of_week, hour_of_day
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/models/sources/shopify_source.yml
================================================
version: 2
sources:
- name: shopify
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: shopify_airbyte
tables:
- name: abandoned_checkouts
description: "Simulated abandoned_checkouts data from the Shopify connector."
columns:
- name: id
description: "Unique identifier for the abandoned_checkouts."
- name: abandoned_checkout_url
- name: billing_address
- name: buyer_accepts_marketing
- name: cart_token
- name: closed_at
- name: completed_at
- name: created_at
- name: currency
- name: customer
- name: customer_locale
- name: device_id
- name: discount_codes
- name: email
- name: gateway
- name: landing_site
- name: line_items
- name: location_id
- name: name
- name: note
- name: note_attributes
- name: phone
- name: presentment_currency
- name: referring_site
- name: shipping_address
- name: shipping_lines
- name: shop_url
- name: source
- name: source_identifier
- name: source_name
- name: source_url
- name: subtotal_price
- name: tax_lines
- name: taxes_included
- name: token
- name: total_discounts
- name: total_line_items_price
- name: total_price
- name: total_tax
- name: total_weight
- name: updated_at
- name: user_id
- name: customers
description: "Simulated customers data from the Shopify connector."
columns:
- name: id
description: "Unique identifier for the customers."
- name: accepts_marketing
- name: accepts_marketing_updated_at
- name: addresses
- name: admin_graphql_api_id
- name: created_at
- name: currency
- name: default_address
- name: email
- name: email_marketing_consent
- name: first_name
- name: last_name
- name: last_order_id
- name: last_order_name
- name: marketing_opt_in_level
- name: multipass_identifier
- name: note
- name: orders_count
- name: phone
- name: shop_url
- name: sms_marketing_consent
- name: state
- name: tags
- name: tax_exempt
- name: tax_exemptions
- name: total_spent
- name: updated_at
- name: verified_email
- name: transactions
description: "Simulated transactions data from the Shopify connector."
columns:
- name: id
description: "Unique identifier for the transactions."
- name: admin_graphql_api_id
- name: amount
- name: authorization
- name: created_at
- name: currency
- name: device_id
- name: error_code
- name: gateway
- name: kind
- name: location_id
- name: message
- name: order_id
- name: parent_id
- name: payment_details
- name: payment_id
- name: processed_at
- name: receipt
- name: shop_url
- name: source_name
- name: status
- name: test
- name: total_unsettled_set
- name: user_id
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/models/staging/stg_abandoned_checkouts.sql
================================================
select
*
from {{ source('shopify', 'abandoned_checkouts') }}
================================================
FILE: shopping_cart_analytics_shopify/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: shopping_cart_analytics_shopify/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: shopping_cart_analytics_shopify/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: shopping_cart_analytics_shopify/infra/airbyte/main.tf
================================================
// Source
resource "airbyte_source_shopify" "my_source_shopify" {
configuration = {
credentials = {
source_shopify_shopify_authorization_method_api_password = {
api_password = var.api_password
auth_method = "api_password"
}
}
shop = var.shop
source_type = "shopify"
}
name = "Shopify"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "shopify_bigquery" {
name = "Shopify to BigQuery"
source_id = airbyte_source_shopify.my_source_shopify.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "customers"
},
{
name = "transactions"
},
{
name = "abandoned_checkouts"
}
]
}
}
================================================
FILE: shopping_cart_analytics_shopify/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: shopping_cart_analytics_shopify/infra/airbyte/variables.tf
================================================
variable "api_password" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
variable "shop" {
type = string
}
================================================
FILE: shopping_cart_analytics_shopify/orchestration/orchestration/__init__.py
================================================
================================================
FILE: shopping_cart_analytics_shopify/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password="password"
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance, key_prefix= "shopify")
================================================
FILE: shopping_cart_analytics_shopify/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: shopping_cart_analytics_shopify/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: shopping_cart_analytics_shopify/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: shopping_cart_analytics_shopify/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: shopping_cart_analytics_shopify/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: shopping_cart_analytics_shopify/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: ticket_volume_analytics_zendesk_support/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: ticket_volume_analytics_zendesk_support/README.md
================================================
# Customer Ticket Volume Analytics Stack With Zendesk Support, Airbyte, Dbt, Dagster and BigQuery
Welcome to the "Ticket Volume Analytics Stack" repository! ✨ This is your go-to place to easily set up a data stack using Zendesk Support, Airbyte, Dbt, BigQuery, and Dagster. With this setup, you can pull Zendesk Support data, extract it using Airbyte, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
Below is a visual representation of how data flows through our integrated tools in this Quickstart. This comes from Dagster's global asset lineage view:
.svg>)
## Table of Contents
- [Prerequisites](#prerequisites)
- [Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery)
- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform)
- [Setting Up the dbt Project](#4-setting-up-the-dbt-project)
- [Orchestrating with Dagster](#5-orchestrating-with-dagster)
- [Next Steps](#next-steps)
## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli).
5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add ticket_volume_analytics_zendesk_support
```
2. **Navigate to the directory**:
```bash
cd ticket_volume_analytics_zendesk_support
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 3. Setting Up Airbyte Connectors with Terraform
Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up:
1. **Navigate to the Airbyte Configuration Directory**:
Change to the relevant directory containing the Terraform configuration for Airbyte:
```bash
cd infra/airbyte
```
2. **Modify Configuration Files**:
Within the `infra/airbyte` directory, you'll find three crucial Terraform files:
- `provider.tf`: Defines the Airbyte provider.
- `main.tf`: Contains the main configuration for creating Airbyte resources.
- `variables.tf`: Holds various variables, including credentials.
Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials.
3. **Initialize Terraform**:
This step prepares Terraform to create the resources defined in your configuration files.
```bash
terraform init
```
4. **Review the Plan**:
Before applying any changes, review the plan to understand what Terraform will do.
```bash
terraform plan
```
5. **Apply Configuration**:
After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources.
```bash
terraform apply
```
6. **Verify in Airbyte UI**:
Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go.
## 4. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 5. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools.
5. **Materialize Dagster Assets**:
In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Faker and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models.
## Next Steps
Congratulations on deploying and running the Customer Satisfaction Analytics Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/busier_day_of_week_analysis.sql
================================================
WITH ticket_day_of_week_counts AS (
SELECT
EXTRACT(DAYOFWEEK FROM created_at) AS day_of_week,
COUNT(*) AS ticket_count
FROM {{ ref('stg_tickets') }}
GROUP BY 1
)
SELECT * FROM ticket_day_of_week_counts
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/pattern_and_trend_analysis.sql
================================================
with ticket_hourly_counts as (
select
extract(hour from created_at) as hour_of_day,
count(*) as ticket_count
from {{ ref('stg_tickets') }}
group by 1
)
select * from ticket_hourly_counts
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/seasonal_analysis.sql
================================================
with ticket_seasonal_counts as (
select
extract(month from created_at) as month,
count(*) as ticket_count
from {{ ref('stg_tickets') }}
group by 1
)
select * from ticket_seasonal_counts
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/ticket_priority_analysis.sql
================================================
with ticket_priority_counts as (
select
priority,
count(*) as ticket_count
from {{ ref('stg_tickets') }}
group by 1
)
select * from ticket_priority_counts
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/ticket_resolution_time_analysis.sql
================================================
with ticket_resolution_times as (
select
TIMESTAMP_DIFF(solved_at, created_at, HOUR) as resolution_time,
count(*) as ticket_count
from transformed_data.stg_ticket_metrics
where solved_at is not null
group by 1
)
select * from ticket_resolution_times
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/ticket_source_analysis.sql
================================================
with source_data as (
select
JSON_EXTRACT_SCALAR(json_data, '$.channel') as ticket_source,
count(*) as ticket_count
from (
select
JSON_EXTRACT_ARRAY(via)[OFFSET(0)] as json_data
from transformed_data.stg_tickets
) t
group by 1
)
select * from source_data
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/marts/ticket_volume_analysis.sql
================================================
with ticket_counts as (
select
date(created_at) as ticket_date,
count(*) as ticket_count
from {{ ref('stg_tickets') }}
group by 1
)
select * from ticket_counts
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/sources/zendesk_support_sources.yml
================================================
version: 2
sources:
- name: zendesk_support
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: zendesk_airbyte_trial
tables:
- name: users
description: "Simulated user data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the user."
- name: active
- name: alias
- name: chat_only
- name: created_at
- name: custom_role_id
- name: default_group_id
- name: details
- name: email
- name: external_id
- name: updated_at
- name: iana_time_zone
- name: last_login_at
- name: locale
- name: locale_id
- name: moderator
- name: name
- name: notes
- name: only_private_comments
- name: organization_id
- name: permanently_deleted
- name: phone
- name: photo
- name: report_csv
- name: restricted_agent
- name: role
- name: role_type
- name: shared
- name: shared_agent
- name: shared_phone_number
- name: signature
- name: suspended
- name: tags
- name: ticket_restriction
- name: time_zone
- name: two_factor_auth_enabled
- name: url
- name: user_fields
- name: verified
- name: tickets
description: "Simulated tickets data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket."
- name: updated_at
- name: allow_attachments
- name: allow_channelback
- name: assignee_id
- name: brand_id
- name: collaborator_ids
- name: created_at
- name: custom_fields
- name: custom_status_id
- name: description
- name: due_at
- name: email_cc_ids
- name: external_id
- name: fields
- name: follower_ids
- name: followup_ids
- name: forum_topic_id
- name: from_messaging_channel
- name: generated_timestamp
- name: group_id
- name: has_incidents
- name: is_public
- name: organization_id
- name: priority
- name: problem_id
- name: raw_subject
- name: recipient
- name: requester_id
- name: satisfaction_rating
- name: sharing_agreement_ids
- name: status
- name: subject
- name: submitter_id
- name: tags
- name: ticket_form_id
- name: type
- name: url
- name: via
- name: schedules
description: "Simulated schedules data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the schedule."
- name: created_at
- name: intervals
- name: name
- name: time_zone
- name: updated_at
- name: ticket_metrics
description: "Simulated ticket_metrics data from the Zendesk Support connector."
columns:
- name: id
description: "Unique identifier for the ticket_metrics."
- name: agent_wait_time_in_minutes
- name: created_at
- name: assigned_at
- name: assignee_stations
- name: assignee_updated_at
- name: custom_status_updated_at
- name: first_resolution_time_in_minutes
- name: full_resolution_time_in_minutes
- name: group_stations
- name: initially_assigned_at
- name: instance_id
- name: latest_comment_added_at
- name: metric
- name: on_hold_time_in_minutes
- name: updated_at
- name: url
- name: reopens
- name: replies
- name: reply_time_in_minutes
- name: requester_updated_at
- name: requester_wait_time_in_minutes
- name: solved_at
- name: status
- name: status_updated_at
- name: ticket_id
- name: time
- name: type
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/staging/stg_schedules.sql
================================================
select
*
from {{ source('zendesk_support', 'schedules') }}
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/staging/stg_ticket_metrics.sql
================================================
select
*
from {{ source('zendesk_support', 'ticket_metrics') }}
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/staging/stg_tickets.sql
================================================
select
*
from {{ source('zendesk_support', 'tickets') }}
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/models/staging/stg_users.sql
================================================
select
*
from {{ source('zendesk_support', 'users') }}
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
threads: 1
type: bigquery
target: dev
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: ticket_volume_analytics_zendesk_support/dbt_project/tests/.gitkeep
================================================
================================================
FILE: ticket_volume_analytics_zendesk_support/infra/.gitignore
================================================
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
================================================
FILE: ticket_volume_analytics_zendesk_support/infra/airbyte/.terraform.lock.hcl
================================================
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/airbytehq/airbyte" {
version = "0.3.3"
constraints = "0.3.3"
hashes = [
"h1:0LmuAc5LvlMuOUPtNEaCAh9FHrV/C877bDJhm9Lz8MU=",
"zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6",
"zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004",
"zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6",
"zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4",
"zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1",
"zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1",
"zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f",
"zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90",
"zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0",
"zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72",
"zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334",
"zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0",
"zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14",
"zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb",
"zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648",
]
}
================================================
FILE: ticket_volume_analytics_zendesk_support/infra/airbyte/main.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
// Sources
resource "airbyte_source_zendesk_support" "my_source_zendesksupport" {
configuration = {
source_type = "zendesk-support"
credentials = {
source_zendesk_support_authentication_api_token = {
credentials = "api_token"
api_token = var.api_token
email = var.email
}
}
start_date = "2020-10-15T00:00:00Z"
subdomain = "self3836"
}
name = "Zendesk Support"
workspace_id = var.workspace_id
}
// Destinations
resource "airbyte_destination_bigquery" "bigquery" {
configuration = {
dataset_id = var.dataset_id
dataset_location = "US"
destination_type = "bigquery"
project_id = var.project_id
credentials_json = var.credentials_json
loading_method = {
destination_bigquery_loading_method_standard_inserts = {
method = "Standard"
}
}
}
name = "BigQuery"
workspace_id = var.workspace_id
}
// Connections
resource "airbyte_connection" "zendesk_support_bigquery" {
name = "Zendesk Support to BigQuery"
source_id = airbyte_source_zendesk_support.my_source_zendesksupport.source_id
destination_id = airbyte_destination_bigquery.bigquery.destination_id
configurations = {
streams = [
{
name = "users"
},
{
name = "tickets"
},
{
name = "satisfaction_ratings"
},
{
name = "ticket_metrics"
},
{
name = "ticket_metric_events"
},
{
name = "ticket_comments"
},
{
name = "ticket_audits"
}
]
}
}
================================================
FILE: ticket_volume_analytics_zendesk_support/infra/airbyte/provider.tf
================================================
// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs
terraform {
required_providers {
airbyte = {
source = "airbytehq/airbyte"
version = "0.3.3"
}
}
}
provider "airbyte" {
// If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy,
// include the actual password/username you've set up (or use the defaults below)
username = "airbyte"
password = "password"
// if running locally (Airbyte OSS), include the server url to the airbyte-api-server
server_url = "http://localhost:8006/v1/"
}
================================================
FILE: ticket_volume_analytics_zendesk_support/infra/airbyte/variables.tf
================================================
variable "api_token" {
type = string
}
variable "email" {
type = string
}
variable "workspace_id" {
type = string
}
variable "dataset_id" {
type = string
}
variable "project_id" {
type = string
}
variable "credentials_json" {
type = string
}
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/orchestration/__init__.py
================================================
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password="password"
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance,)
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: ticket_volume_analytics_zendesk_support/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: ticket_volume_analytics_zendesk_support/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)
================================================
FILE: vector_store_integration/AI_assistant_streamlit_app/.gitignore
================================================
venv/
.secrets
.streamlit
dist/
build/
*.egg-info/
.DS_STORE
================================================
FILE: vector_store_integration/AI_assistant_streamlit_app/README.md
================================================
### Run Locally
- The project is set to work with our database credentials and OpenAI API key. You will need to update the credentials to work with your own data.
- Create a folder called `.streamlit` and create a file called `secrets.toml`.
- Add the following to the `secrets.toml` file:
```toml
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
SNOWFLAKE_HOST="YOUR_SNOWFLAKE_ACCOUNT_NAME"
SNOWFLAKE_ROLE="YOUR_SNOWFLAKE_ROLE"
SNOWFLAKE_WAREHOUSE="YOUR_SNOWFLAKE_WAREHOUSE"
SNOWFLAKE_DATABASE="YOUR_SNOWFLAKE_DATABASE"
SNOWFLAKE_SCHEMA="YOUR_SNOWFLAKE_SCHEMA"
SNOWFLAKE_USERNAME="YOUR_SNOWFLAKE_USERNAME"
SNOWFLAKE_PASSWORD="YOUR_SNOWFLAKE_PASSWORD"
```
- Update the table names and LLM instructions in `app.py` with data specific to your use case.
- Create a virtual environment:
```bash
python3 -m venv venv
```
- Activate the virtual environment:
```bash
source venv/bin/activate
```
- Install the requirements:
```bash
pip install -r requirements.txt
```
- Run the app:
```bash
streamlit run app.py
```
- Open your browser and go to [http://localhost:8501](http://localhost:8501).
### Deploy to Streamlit Community Cloud
- Copy the code into its own repository.
- Create an account [here](https://streamlit.io/cloud) and follow the instructions to give access to the repository you created.
================================================
FILE: vector_store_integration/AI_assistant_streamlit_app/app.py
================================================
import streamlit as st
import random
import time
from openai import OpenAI
import openai
from snowflake import connector
from langchain.embeddings import OpenAIEmbeddings
from typing import List
def get_db_connection():
return connector.connect(
account=st.secrets["SNOWFLAKE_HOST"],
role=st.secrets["SNOWFLAKE_ROLE"],
warehouse=st.secrets["SNOWFLAKE_WAREHOUSE"],
database=st.secrets["SNOWFLAKE_DATABASE"],
schema=st.secrets["SNOWFLAKE_SCHEMA"],
user=st.secrets["SNOWFLAKE_USERNAME"],
password=st.secrets["SNOWFLAKE_PASSWORD"],
)
def get_similar_chunks(query_vector, table_names) -> List[str]:
conn = get_db_connection()
cursor = conn.cursor()
chunks = []
for table_name in table_names:
query = f"""
SELECT document_content, VECTOR_COSINE_SIMILARITY(embedding, CAST({query_vector} AS VECTOR(FLOAT, 1536))) AS similarity
FROM {table_name}
ORDER BY similarity DESC
LIMIT 2
"""
cursor.execute(query)
result = cursor.fetchall()
chunks += [item[0] for item in result]
cursor.close()
conn.close()
return chunks
def get_completion(question, document_chunks: List[str], model_name: str = "llama2-70b-chat"):
conn = get_db_connection()
cur = conn.cursor()
chunks = "\n\n".join(document_chunks)
query = f"""
SELECT snowflake.cortex.complete(
'{model_name}',
CONCAT(
'You are an Airbyte product assistant. Answer the question based on the context. Be concise. When returning a list of items, Please enumerate description on separate lines','Context: ',
$$
{chunks}
$$,
'Question: ',
$$ {question} $$,
'Answer: '
)
) as response;"""
cur.execute(query)
result = cur.fetchall()
cur.close()
conn.close()
# TO-DO: better parsing here
return result[0][0].strip()
def get_user_intent(query):
# this method does a simple few shots classifcation to help get user's intent
examples = [
{"role": "system", "content": "You are an assistant that classifies user intents based on their messages."},
{"role": "user", "content": "How can I add vector data in Snowflake?"},
{"role": "assistant", "content": "docs_question"},
{"role": "user", "content": "How do I set up Snowflake Cortex destination?"},
{"role": "assistant", "content": "docs_question"},
{"role": "user", "content": "What are the upcoming features for Snowflake Cortex?"},
{"role": "assistant", "content": "github_question"},
{"role": "user", "content": "What are the known issues for Snowflake Cortex?"},
{"role": "assistant", "content": "github_question"},
{"role": "user", "content": "Which customers have requested features for Snowflake Cortex?"},
{"role": "assistant", "content": "zendesk_question"},
{"role": "user", "content": "Which customers have requested authorization related features for Snowflake Cortex?"},
{"role": "assistant", "content": "zendesk_question"},
]
examples.append({"role": "user", "content": query})
openai.api_key = st.secrets["OPENAI_API_KEY"]
# Call the OpenAI chat API to get the intent classification
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=examples,
max_tokens=10,
n=1,
stop=["\n"]
)
intent = response.choices[0].message.content
return intent
def get_tables_to_query(query):
intent = get_user_intent(query)
if intent == "github_question":
return (["airbyte_github_issues"], "llama2-70b-chat")
elif intent == "zendesk_question":
return (["airbyte_zendesk_tickets", "airbyte_zendesk_users"], "llama2-70b-chat")
else:
# default
return(["airbyte_docs"], "snowflake-arctic")
def get_response(query):
# embed the query
embeddings = OpenAIEmbeddings(openai_api_key=st.secrets["OPENAI_API_KEY"])
# get similar chunks from sources/tables in Snowflake
[tables, model] = get_tables_to_query(query)
chunks = get_similar_chunks(embeddings.embed_query(query), tables)
if (len(chunks) == 0):
return "I am sorry, I do not have the context to answer your question."
else:
return get_completion(query, chunks, model)
def response_generator(query):
response = get_response(query)
# Split the response by spaces but preserve the new lines
parts = []
current_part = []
for char in response:
if char in ['\n', ' ']:
if current_part:
parts.append(''.join(current_part))
current_part = []
if char == '\n':
parts.append('\n')
else:
parts.append(' ')
else:
current_part.append(char)
if current_part:
parts.append(''.join(current_part))
for part in parts:
yield part
time.sleep(0.01)
st.title("AI Product assistant")
# simulate initial message from assistant
initial_message = """👋 Hello! I'm here to help you with any questions you have on Airbyte's products or services.
How can I assist you today?"""
# Display initial message from assistant
with st.chat_message("assistant"):
st.write(initial_message)
# Recommended questions section in the sidebar
st.sidebar.title("Recommended Questions")
recommended_questions = [
"How can I add vector data in Snowflake?",
"What are the upcoming features for Snowflake Cortex?",
"Which customers have requested features for Snowflake Cortex?",
]
for question in recommended_questions:
st.sidebar.markdown(f"- {question}")
client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
# Set a default model
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask a question?"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant"):
response = st.write_stream(response_generator(prompt))
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
================================================
FILE: vector_store_integration/AI_assistant_streamlit_app/requirements.txt
================================================
streamlit
openai
snowflake-connector-python
langchain
langchain-community
tiktoken
================================================
FILE: vector_store_integration/RAG_using_PGVector.ipynb
================================================
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"metadata": {},
"cell_type": "code",
"outputs": [],
"execution_count": null,
"source": [
"## Airbyte PGVector RAG Demo\n",
"\n",
"This tutorial demonstrates how to use data stored in Airbyte's PGVector destination to perform Retrieval-Augmented Generation (RAG). You should use this destination when you intend to use PGVector for LLM specific vector operations like RAG.\n",
"\n",
"As a practical example, we'll build a Assistant—an AI chatbot capable of answering questions related to simpsons episoded using data from multiple Airbyte-related sources.\n",
"\n",
"#### Prerequisites:\n",
"* Vector data stored in Postgres with Vector colums via PGVector destination. In our case we are using data from kaggle.\n",
"* Postgresql DB with PGVector enabled\n",
"* Open AI key\n"
]
},
{
"cell_type": "markdown",
"source": [
"### a. Install dependencies and import secrets\n",
"\n"
],
"metadata": {
"id": "7R0-uD7R3Uki"
}
},
{
"cell_type": "code",
"source": [
"!pip install sqlalchemy openai rich psycopg2 python-dotenv langchain-openai"
],
"metadata": {
"collapsed": true,
"id": "HbR-po_Z3VFV"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import openai\n",
"import json\n",
"import rich\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from sqlalchemy import create_engine\n",
"from sqlalchemy.engine import URL\n",
"from google.colab import userdata\n",
"\n",
"\n",
"OPENAI_API_KEY = userdata.get('openai_api_key')\n",
"HOST = userdata.get(\"db_host\")\n",
"USERNAME = userdata.get(\"db_username\")\n",
"PASSWORD = userdata.get(\"db_password\")\n",
"DATABASE = userdata.get(\"db_name\")"
],
"metadata": {
"id": "LP0QfMFQ6Flz"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"### b. Initialize open AI client and DB Engine\n"
],
"metadata": {
"id": "mmHB_MId7zwo"
}
},
{
"cell_type": "code",
"source": [
"openai.api_key = OPENAI_API_KEY\n",
"\n",
"url = URL.create(\n",
" \"postgresql\",\n",
" host=HOST,\n",
" username=USERNAME,\n",
" password=PASSWORD,\n",
" database=DATABASE,\n",
")\n",
"\n",
"engine = create_engine(url)"
],
"metadata": {
"id": "7Y3iCe6e7-Ra"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"### d. Explore data stored in Posgresql\n",
"\n",
"We need a few methods to embed user questions and make searches in DB\n",
"\n",
"- Helper to embed the user question so we can the search for it in the DB.\n",
"- Function to get the context from the database using a user question as input.\n",
"- One to get the response from the chat assistant that will use the context using the method from previous step."
],
"metadata": {
"id": "ZQ0PzOfb9rtK"
}
},
{
"cell_type": "code",
"source": [
"from sqlalchemy import text\n",
"from openai import OpenAI\n",
"\n",
"client = OpenAI(\n",
" api_key=OPENAI_API_KEY,\n",
")\n",
"\n",
"def get_embedding_from_open_ai(question):\n",
" print(f\"Embedding user's query: {question}\")\n",
" embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)\n",
" embedding_response = embeddings.embed_query(question)\n",
" return embedding_response\n",
"\n",
"QUERY_TEMPLATE = \"\"\"\n",
"SELECT document_content,\n",
" metadata->'title' as episode_title,\n",
" metadata->'script_line_number' as script_line_number,\n",
" metadata->'name' as character_name,\n",
" metadata->'spoken_words' as spoken_words\n",
"FROM episode_spoken_words\n",
"ORDER BY embedding <-> :question_vector\n",
"LIMIT 5\n",
"\"\"\"\n",
"\n",
"def get_context(question) -> str:\n",
" # Get the embedding from OpenAI\n",
" question_vector = get_embedding_from_open_ai(question)\n",
"\n",
" # Format the question vector as a string in the format expected by PostgreSQL\n",
" question_vector_str = '[' + ','.join(map(str, question_vector)) + ']'\n",
"\n",
" # Use the text() function for raw queries with SQLAlchemy\n",
" query = text(QUERY_TEMPLATE)\n",
"\n",
" # Execute the query, passing the vector as a bind parameter\n",
" with engine.connect() as connection:\n",
" result = connection.execute(query, {'question_vector': question_vector_str})\n",
"\n",
" # Format and return the result\n",
" return (\"\\n\\n\" + \"-\" * 8 + \"\\n\\n\").join(\n",
" [\n",
" f\"Episode {row.episode_title} | Line number: {row.script_line_number} | \"\n",
" f\"Spoken Words: {row.spoken_words} | Character: {row.character_name}\"\n",
" for row in result\n",
" ]\n",
" )\n",
"\n",
"\n",
"def get_response(question):\n",
" response = client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are a Simpsons expert talking about Simpsons episodes.\"},\n",
" {\"role\": \"user\", \"content\": question},\n",
" {\"role\": \"assistant\", \"content\": f\"Use only this information to answer the question: {get_context(question)}. Do not search on the internet.\"}\n",
" ]\n",
" )\n",
" return response.choices[0].message.content\n",
"\n"
],
"metadata": {
"id": "_PJ6eb5-A419"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"### d. Make questions\n",
"\n",
"Finally, let's put all together and get a response from our assistant using the Simpsons database."
],
"metadata": {
"id": "spQPCVe9AZKh"
}
},
{
"cell_type": "code",
"source": [
"question = \"Talking about food\"\n",
"response = get_response(question)\n",
"rich.print(response)"
],
"metadata": {
"id": "pwegM_02AgOU"
},
"execution_count": null,
"outputs": []
}
]
}
================================================
FILE: vector_store_integration/RAG_using_Snowflake_Cortex.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "KWKEgtakJtH6"
},
"source": [
"## Airbyte Snowflake Cortex RAG Demo\n",
"\n",
"This tutorial demonstrates how to use data stored in Airbyte's Snowflake Cortex destination to perform Retrieval-Augmented Generation (RAG). You should use this destination when you intend to use Snowflake for LLM specific vector operations like RAG.\n",
"\n",
"As a practical example, we'll build a Product Assistant—an AI chatbot capable of answering product-related questions using data from multiple Airbyte-related sources. With the Product Assistant, you can ask questions across all your sales enablement data in one place.\n",
"\n",
"#### Prerequisites:\n",
"* Vector data stored in Snowflake via Snowflake Cortex destination. In our case we are using data from airbyte docs, Github issues and Zendesk.\n",
"* Snowflake account with Cortex functions enabled\n",
"* Open AI key\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "gS3oGgI0CVpn"
},
"source": [
"### a. Install dependencies\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "bM4Te8XEWECV"
},
"outputs": [],
"source": [
"# Add virtual environment support in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install openai\n",
"# tbd - add snowflake python connector\n",
"%pip install --quiet openai snowflake-connector-python langchain-openai tiktoken"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "nKbdq1eUcmAz"
},
"source": [
"### b. Explore data stored in Snowflake.\n",
"\n",
"Let's see what document/vecto data in Snowflake looks like."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"collapsed": true,
"id": "TKDGBUhLcfuK",
"outputId": "1231d559-298f-43a0-95c3-a67f8cfb62ab"
},
"outputs": [],
"source": [
"# Fetch data from airbyte_docs table\n",
"from snowflake import connector\n",
"from google.colab import userdata\n",
"from typing import List\n",
"import pandas as pd\n",
"\n",
"def get_db_connection():\n",
" return connector.connect(\n",
" account=userdata.get(\"SNOWFLAKE_HOST\"),\n",
" role=userdata.get(\"SNOWFLAKE_ROLE\"),\n",
" warehouse=userdata.get(\"SNOWFLAKE_WAREHOUSE\"),\n",
" database=userdata.get(\"SNOWFLAKE_DATABASE\"),\n",
" schema=userdata.get(\"SNOWFLAKE_SCHEMA\"),\n",
" user=userdata.get(\"SNOWFLAKE_USERNAME\"),\n",
" password=userdata.get(\"SNOWFLAKE_PASSWORD\"),\n",
" )\n",
"\n",
"def fetch_table_data(table_name, columns):\n",
" conn = get_db_connection()\n",
" cursor = conn.cursor()\n",
"\n",
" # Form the query to select specific columns\n",
" columns_str = \", \".join(columns)\n",
" query = f\"SELECT {columns_str} FROM {table_name};\"\n",
"\n",
" cursor.execute(query)\n",
" result = cursor.fetchall()\n",
"\n",
" # Fetch the column names\n",
" col_names = [desc[0] for desc in cursor.description]\n",
"\n",
" cursor.close()\n",
" conn.close()\n",
"\n",
" # Load the result into a pandas DataFrame\n",
" df = pd.DataFrame(result, columns=col_names)\n",
" return df;\n",
"\n",
"# show data from airbtye_docs table\n",
"data_frame = fetch_table_data(\"airbyte_docs\", [\"document_id\", \"document_content\", \"metadata\", \"embedding\"])\n",
"data_frame"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5LU0M4g6clBj"
},
"source": []
},
{
"cell_type": "markdown",
"metadata": {
"id": "SyyzczWXBXTS"
},
"source": [
"### c. Build the RAG pipeline and ask a question\n",
"\n",
"Let's write the three main pieces of a RAG pipeline:\n",
"* Embedding incoming query\n",
"* Doing similarity search to find matching chunks\n",
"* Send chunks to LLM for completion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 265
},
"id": "zQ6rWEV2u-3U",
"outputId": "d364b7e3-cfca-4628-da20-3ae4f34ad143"
},
"outputs": [],
"source": [
"from openai import OpenAI\n",
"from snowflake import connector\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from google.colab import userdata\n",
"from typing import List\n",
"from rich.console import Console\n",
"\n",
"def get_db_connection():\n",
" return connector.connect(\n",
" account=userdata.get(\"SNOWFLAKE_HOST\"),\n",
" role=userdata.get(\"SNOWFLAKE_ROLE\"),\n",
" warehouse=userdata.get(\"SNOWFLAKE_WAREHOUSE\"),\n",
" database=userdata.get(\"SNOWFLAKE_DATABASE\"),\n",
" schema=userdata.get(\"SNOWFLAKE_SCHEMA\"),\n",
" user=userdata.get(\"SNOWFLAKE_USERNAME\"),\n",
" password=userdata.get(\"SNOWFLAKE_PASSWORD\"),\n",
" )\n",
"\n",
"# convert user's query into a vector array to prep for similiary search\n",
"def get_embedding_from_openai(query)->str:\n",
" print(f\"Embedding user's query -> {query}...\")\n",
" embeddings = OpenAIEmbeddings(openai_api_key=userdata.get(\"OPENAI_API_KEY\"))\n",
" return embeddings\n",
"\n",
"# use Snowflake's Cortex in-build similarity search to find matching chunks\n",
"def get_similar_chunks_from_snowflake(query_vector, table_names) -> List[str]:\n",
" print(\"\\nRetrieving similar chunks...\")\n",
" conn = get_db_connection()\n",
" cursor = conn.cursor()\n",
"\n",
" chunks = []\n",
" for table_name in table_names:\n",
" query = f\"\"\"\n",
" SELECT document_content,\n",
" VECTOR_COSINE_SIMILARITY(embedding, CAST({query_vector} AS VECTOR(FLOAT, 1536))) AS similarity\n",
" FROM {table_name}\n",
" ORDER BY similarity DESC\n",
" LIMIT 2\n",
" \"\"\"\n",
" cursor.execute(query)\n",
" result = cursor.fetchall()\n",
" print(f\"Found {len(result)} matching chunks in table:{table_name}!\")\n",
" chunks += [item[0] for item in result]\n",
" cursor.close()\n",
" conn.close()\n",
"\n",
" return chunks\n",
"\n",
"# use Snowflake's Cortex in-build completion to find matching chunks.\n",
"def get_completion_from_snowflake(question, document_chunks: List[str], model_name):\n",
" print(f\"\\nSending chunks to Snowflake (LLM: {model_name}) for completion...\")\n",
" conn = get_db_connection()\n",
" cur = conn.cursor()\n",
"\n",
" chunks = \"\\n\\n\".join(document_chunks)\n",
"\n",
" query = f\"\"\"\n",
" SELECT snowflake.cortex.complete(\n",
" '{model_name}',\n",
" CONCAT(\n",
" 'You are an Airbyte product assistant. Answer the question based on the context. Do not use any other information. Be concise. When returning a list of items, please enumerate description on separate lines','Context: ',\n",
" $$\n",
" {chunks}\n",
" $$,\n",
" 'Question: ',\n",
" $$ {question} $$,\n",
" 'Answer: '\n",
" )\n",
" ) as response;\"\"\"\n",
" cur.execute(query)\n",
" result = cur.fetchall()\n",
" cur.close()\n",
" conn.close()\n",
" # TO-DO: better parsing here\n",
" return result[0][0].strip()\n",
"\n",
"# Putting it all together\n",
"def get_response(query, table_names, model_name=\"llama2-70b-chat\"):\n",
" # Step 1: embed the query\n",
" embeddings = get_embedding_from_openai(query)\n",
"\n",
" # Step 2: get similar chunks from sources/tables in Snowflake\n",
" chunks = get_similar_chunks_from_snowflake(embeddings.embed_query(query), table_names)\n",
"\n",
" if (len(chunks) == 0):\n",
" return \"I am sorry, I do not have the context to answer your question.\"\n",
" else:\n",
" # Step 3: send chunks to LLM for completion\n",
" return get_completion_from_snowflake(query, chunks, model_name)\n",
"\n",
"# Ask a question\n",
"query = 'How can I store vector data in Snowflake'\n",
"response = get_response(query, [\"airbyte_docs\"], \"snowflake-arctic\")\n",
"\n",
"Console().print(f\"\\n\\nResponse from LLM:\\n\\n[blue]{response}[/blue]\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "G5NvV86T57-V"
},
"source": [
"### d. Let's ask another question\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 281
},
"id": "cPYdEs663tl8",
"outputId": "6997984b-6ea3-4b95-d80f-ccace53b6d43"
},
"outputs": [],
"source": [
"query = 'What are the upcoming features for Snowflake Cortex?'\n",
"response = get_response(query, [\"airbyte_github_issues\"])\n",
"Console().print(f\"\\n\\nResponse from LLM:\\n\\n[blue]{response}[/blue]\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S_CJ3_C9pvw4"
},
"source": [
"### e. Closing the loop\n",
"Let's see if there are customers asking for upcoming features above."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 267
},
"id": "mLfNlzGLqOIg",
"outputId": "dcfa593f-dab0-44ff-fcec-e4cb671b6d1a"
},
"outputs": [],
"source": [
"query = 'Are there customers asking for better authorization options for Snowflake Cortex? Give me their names and email.'\n",
"response = get_response(query, [\"airbyte_zendesk_tickets\", \"airbyte_zendesk_users\"])\n",
"Console().print(f\"\\n\\nResponse from LLM:\\n\\n[blue]{response}[/blue]\")"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [
"nKbdq1eUcmAz"
],
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: vector_store_integration/RAG_using_Vectara.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "ayf6NOyRb-zn"
},
"source": [
"# **Airbyte Vectara RAG Tutorial**\n",
"This tutorial showcases how to use data stored in Airbyte's Vectara destination to perform Retrieval-Augmented Generation (RAG).\n",
"## **Prerequisites**\n",
"**1) OpenAI API Key**:\n",
" - **Create an OpenAI Account**: Sign up for an account on [OpenAI](https://www.openai.com/).\n",
" - **Generate an API Key**: Go to the API section and generate a new API key. For detailed instructions, refer to the [OpenAI documentation](https://beta.openai.com/docs/quickstart).\n",
"\n",
"**2) Vectara Customer ID, Corpus ID ,API Key**:\n",
" - **Create an Vectara Account**: Sign up for an account on [Vectara](https://vectara.com/).\n",
" - **Customer ID**: Click on the profile icon on top right, and look for your customer ID [Vectara Console](https://console.vectara.com/).\n",
" - **Corpus ID**: You can see the list of Corpora you've created in your Vectara Account. Note down the required Corpus ID [Vectara Corpora](https://console.vectara.com/console/corpora).\n",
" - **Generate an API Key**: Go here and generate a new API key. [Vectara API_Key](https://console.vectara.com/console/apiAccess/personalApiKey)."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "41LFzVLEmXT3"
},
"source": [
"# **Install Dependencies**\n",
"As in any other Python Code, the first step is to install the required Dependencies!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NSOZnJ6G9zQu"
},
"outputs": [],
"source": [
"# Add virtual environment support in Google Colab\n",
"!apt-get install -qq python3.10-venv\n",
"\n",
"# Install required packages\n",
"%pip install --quiet openai langchain-openai tiktoken pandas langchain_community\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "dQTCEicCmx2A"
},
"source": [
"# **Set Up Environment Variables**\n",
"We configure the required credentials here!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "94bGodjCFfsN"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"VECTARA_CUSTOMER_ID\"] = \"YOUR_VECTARA_CUSTOMER_ID\"\n",
"os.environ[\"VECTARA_CORPUS_ID\"] = \"YOUR_VECTARA_CORPUS_ID\"\n",
"os.environ[\"VECTARA_API_KEY\"] = \"YOUR_VECTARA_API_KEY\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "QXR8wOMWnztR"
},
"source": [
"# **Initialize Vectara Vector Store**\n",
"We start by initializing the Vectara Vector Store and then see what the data in Vectara looks like.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ipg7aPJq-wZ8"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"from langchain_community.vectorstores import Vectara\n",
"from google.colab import userdata\n",
"\n",
"# Initialize Vectara vector store\n",
"vectara = Vectara(\n",
" vectara_customer_id=os.getenv(\"VECTARA_CUSTOMER_ID\"),\n",
" vectara_corpus_id=os.getenv(\"VECTARA_CORPUS_ID\"),\n",
" vectara_api_key=os.getenv(\"VECTARA_API_KEY\")\n",
")\n",
"\n",
"def fetch_vectara_data():\n",
" # Simulate fetching data from Vectara\n",
" data = {\n",
" \"document_id\": [1, 2, 3],\n",
" \"document_content\": [\"Content of doc 1\", \"Content of doc 2\", \"Content of doc 3\"],\n",
" \"metadata\": [\"Metadata1\", \"Metadata2\", \"Metadata3\"],\n",
" \"embedding\": [\"[0.1, 0.2, ...]\", \"[0.3, 0.4, ...]\", \"[0.5, 0.6, ...]\"]\n",
" }\n",
" df = pd.DataFrame(data)\n",
" return df\n",
"\n",
"# show data\n",
"data_frame = fetch_vectara_data()\n",
"print(data_frame)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6b0VCg4Oox4l"
},
"source": [
"# **Embedding and similarity search with Vectara**\n",
"Here we will convert the user's query into embeddings using OpenAI and retrieve similar chunks from Vectara based on the query"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "G6UuEXP9FpdF"
},
"outputs": [],
"source": [
"from openai import OpenAI\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from typing import List\n",
"from rich.console import Console\n",
"\n",
"\n",
"client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"\n",
"# Convert user's query into a vector array to prep for similarity search\n",
"def get_embedding_from_openai(query) -> List[float]:\n",
" print(f\"Embedding user's query -> {query}...\")\n",
" embeddings = OpenAIEmbeddings(openai_api_key=client.api_key)\n",
" return embeddings.embed_query(query)\n",
"\n",
"# Use Vectara to find matching chunks\n",
"def get_similar_chunks_from_vectara(query: str) -> List[str]:\n",
" print(\"\\nRetrieving similar chunks...\")\n",
" try:\n",
" results = vectara.similarity_search(query=query)\n",
" chunks = [result.page_content for result in results]\n",
" print(f\"Found {len(chunks)} matching chunks!\")\n",
" return chunks\n",
" except Exception as e:\n",
" print(f\"Error in retrieving chunks: {e}\")\n",
" return []\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ZEQN6yohqVSY"
},
"source": [
"# **Building RAG Pipeline and asking a question**\n",
"Finally we use OpenAI for querying our data! \n",
"We know the three main steps of a RAG Pipeline are : \n",
"- Embedding incoming query \n",
"- Doing similarity search to find matching chunks \n",
"- Send chunks to LLM for completion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "zLKAuK-GqLPC"
},
"outputs": [],
"source": [
"# Use OpenAI to complete the response\n",
"def get_completion_from_openai(question, document_chunks: List[str], model_name=\"gpt-3.5-turbo\"):\n",
" print(f\"\\nSending chunks to OpenAI (LLM: {model_name}) for completion...\")\n",
" chunks = \"\\n\\n\".join(document_chunks)\n",
"\n",
" response = client.chat.completions.create(\n",
" model=model_name,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are an Airbyte product assistant. Answer the question based on the context. Do not use any other information. Be concise.\"},\n",
" {\"role\": \"user\", \"content\": f\"Context:\\n{chunks}\\n\\n{question}\\n\\nAnswer:\"}\n",
" ],\n",
" max_tokens=150\n",
" )\n",
" return response.choices[0].message.content.strip()\n",
"\n",
"# Putting it all together\n",
"def get_response(query, model_name=\"gpt-3.5-turbo\"):\n",
"\n",
" chunks = get_similar_chunks_from_vectara(query)\n",
"\n",
" if len(chunks) == 0:\n",
" return \"I am sorry, I do not have the context to answer your question.\"\n",
" else:\n",
"\n",
" return get_completion_from_openai(query, chunks, model_name)\n",
"\n",
"# Ask a question\n",
"query = 'What data do you have?'\n",
"response = get_response(query)\n",
"\n",
"Console().print(f\"\\n\\nResponse from LLM:\\n\\n[blue]{response}[/blue]\")\n"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: weather_data_stack/.gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
#Desktop Services Store
.DS_Store
================================================
FILE: weather_data_stack/README.md
================================================
# Weather Data Stack with Airbyte, dbt, Dagster and BigQuery
Welcome to the "Weather Data Stack" repository! 🌟 This is your go-to place to easily set up a data stack using Airbyte, dbt, BigQuery, and Dagster. With this setup, you can pull weather data from WeatherStack API, put it into BigQuery, and play around with it using dbt and Dagster.
This Quickstart is all about making things easy, getting you started quickly and showing you how smoothly all these tools can work together!
## Table of Contents
- [Weather Data Stack with Airbyte, dbt, Dagster and BigQuery](#weather-data-stack-with-airbyte-dbt-dagster-and-bigquery)
- [Table of Contents](#table-of-contents)
- [Infrastracture Layout](#infrastracture-layout)
- [Prerequisites](#prerequisites)
- [1. Setting an environment for your project](#1-setting-an-environment-for-your-project)
- [2. Weatherstack API Key](#2-weatherstack-api-key)
- [1. Sign Up for a Weatherstack API Key](#1-sign-up-for-a-weatherstack-api-key)
- [3. Setting Up BigQuery](#3-setting-up-bigquery)
- [1. **Create a Google Cloud Project**](#1-create-a-google-cloud-project)
- [2. **Create BigQuery Datasets**](#2-create-bigquery-datasets)
- [3. **Create Service Accounts and Assign Roles**](#3-create-service-accounts-and-assign-roles)
- [4. **Generate JSON Keys for Service Accounts**](#4-generate-json-keys-for-service-accounts)
- [4. Setting Up Airbyte Connectors](#4-setting-up-airbyte-connectors)
- [1. Setting Up Airbyte Connectors with AirByteUI](#1-setting-up-airbyte-connectors-with-airbyteui)
- [5. Setting Up the dbt Project](#5-setting-up-the-dbt-project)
- [Next Steps](#next-steps)
- [1. **Explore the Data and Insights**](#1-explore-the-data-and-insights)
- [2. **Optimize Your dbt Models**](#2-optimize-your-dbt-models)
- [3. **Expand Your Data Sources**](#3-expand-your-data-sources)
- [4. **Enhance Data Quality and Testing**](#4-enhance-data-quality-and-testing)
- [5. **Automate and Monitor Your Pipelines**](#5-automate-and-monitor-your-pipelines)
- [6. **Scale Your Setup**](#6-scale-your-setup)
- [7. **Contribute to the Community**](#7-contribute-to-the-community)
## Infrastracture Layout

## Prerequisites
Before you embark on this integration, ensure you have the following set up and ready:
1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/).
2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS.
3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/).
4. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery).
5. **Weather Stack API**: You can grab your free weather API from [here](https://weatherstack.com/) after account opening. No Credit card is required for the starter version
## 1. Setting an environment for your project
Get the project up and running on your local machine by following these steps:
1. **Clone the repository (Clone only this quickstart)**:
```bash
git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git
```
```bash
cd quickstarts
```
```bash
git sparse-checkout add weather_data_stack
```
2. **Navigate to the directory**:
```bash
cd weather_data_stack
```
3. **Set Up a Virtual Environment**:
- For Mac:
```bash
python3 -m venv venv
source venv/bin/activate
```
- For Windows:
```bash
python -m venv venv
.\venv\Scripts\activate
```
4. **Install Dependencies**:
```bash
pip install -e ".[dev]"
```
## 2. Weatherstack API Key
To extract weather data from the Weatherstack API and store the API key in its own environment variable file, you can follow these steps:
#### 1. Sign Up for a Weatherstack API Key
1. Visit the [Weatherstack website](https://weatherstack.com/).
2. Sign up for an account or log in if you already have one.
3. Once logged in, go to the dashboard to obtain your API key.
## 3. Setting Up BigQuery
#### 1. **Create a Google Cloud Project**
- If you have a Google Cloud project, you can skip this step.
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
- Click on the "Select a project" dropdown at the top right and select "New Project".
- Give your project a name and follow the steps to create it.
#### 2. **Create BigQuery Datasets**
- In the Google Cloud Console, go to BigQuery.
- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt.
- If you pick different names, remember to change the names in the code too.
**How to create a dataset:**
- In the left sidebar, click on your project name.
- Click “Create Dataset”.
- Enter the dataset ID (either `raw_data` or `transformed_data`).
- Click "Create Dataset".
#### 3. **Create Service Accounts and Assign Roles**
- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console.
- Click “Create Service Account”.
- Name your service account (like `airbyte-service-account`).
- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account.
- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles.
**How to create a service account and assign roles:**
- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown.
- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles.
- Finish the creation process.
#### 4. **Generate JSON Keys for Service Accounts**
- For both service accounts, make a JSON key to let the service accounts sign in.
**How to generate JSON key:**
- Find the service account in the “Service accounts” list.
- Click on the service account name.
- In the “Keys” section, click “Add Key” and pick JSON.
- The key will download automatically. Keep it safe and don’t share it.
- Do this for the other service account too.
## 4. Setting Up Airbyte Connectors
Here, you can set up connectors with source and destination manually using the Airbyte UI.
### 1. Setting Up Airbyte Connectors with AirByteUI
Use the generated public url from the previous step to manually configure using the File Option as source under public HTPPS.
- Here are specific [BigQuery](https://docs.airbyte.com/integrations/destinations/bigquery) instructions.
- Follow these [steps](https://docs.airbyte.com/quickstart/set-up-a-connection) for more.
## 5. Setting Up the dbt Project
[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up:
1. **Navigate to the dbt Project Directory**:
Change to the directory containing the dbt configuration:
```bash
cd dbt_project
```
2. **Update Connection Details**:
You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details.
3. **Utilize Environment Variables (Optional but Recommended)**:
To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file.
4. **Test the Connection**:
Once you’ve updated the connection details, you can test the connection to your BigQuery instance using:
```bash
dbt debug
```
If everything is set up correctly, this command should report a successful connection to BigQuery.
5. **Run the Models**:
If you would like to run the dbt models manually at this point, you can do so by executing:
```bash
dbt run
```
You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset.
## 6. Orchestrating with Dagster
[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows:
1. **Navigate to the Orchestration Directory**:
Switch to the directory containing the Dagster orchestration configurations:
```bash
cd orchestration
```
2. **Set Environment Variables**:
Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables:
```bash
export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1
export AIRBYTE_PASSWORD=password
```
Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here.
3. **Launch the Dagster UI**:
With the environment variables in place, kick-start the Dagster UI:
```bash
dagster dev
```
4. **Access Dagster in Your Browser**:
Open your browser and navigate to:
```
http://127.0.0.1:3000
```
Here, you should see assets for both Airbyte and dbt.
## Next Steps
Congratulations on deploying and running the Weather Data Stack Quistart! 🎉 Here are some suggestions on what you can explore next to dive deeper and get more out of your project:
### 1. **Explore the Data and Insights**
- Dive into the datasets in BigQuery, run some queries, and explore the data you've collected and transformed. This is your chance to uncover insights and understand the data better!
### 2. **Optimize Your dbt Models**
- Review the transformations you’ve applied using dbt. Try optimizing the models or create new ones based on your evolving needs and insights you want to extract.
### 3. **Expand Your Data Sources**
- Add more data sources to Airbyte. Explore different types of sources available, and see how they can enrich your existing datasets and broaden your analytical capabilities.
### 4. **Enhance Data Quality and Testing**
- Implement data quality tests in dbt to ensure the reliability and accuracy of your transformations. Use dbt's testing features to validate your data and catch issues early on.
### 5. **Automate and Monitor Your Pipelines**
- Explore more advanced Dagster configurations and setups to automate your pipelines further and set up monitoring and alerting to be informed of any issues immediately.
### 6. **Scale Your Setup**
- Consider scaling your setup to handle more data, more sources, and more transformations. Optimize your configurations and resources to ensure smooth and efficient processing of larger datasets.
### 7. **Contribute to the Community**
- Share your learnings, optimizations, and new configurations with the community. Contribute to the respective tool’s communities and help others learn and grow.
================================================
FILE: weather_data_stack/dbt_project/.gitignore
================================================
target/
dbt_packages/
logs/
.user.yml
================================================
FILE: weather_data_stack/dbt_project/README.md
================================================
Welcome to your new dbt project!
### Using the starter project
Try running the following commands:
- dbt run
- dbt test
### Resources:
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
- Find [dbt events](https://events.getdbt.com) near you
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
================================================
FILE: weather_data_stack/dbt_project/analyses/.gitkeep
================================================
================================================
FILE: weather_data_stack/dbt_project/dbt_project.yml
================================================
# Name your project! Project names should contain only lowercase characters
# and underscores. A good package name should reflect your organization's
# name or the intended use of these models
name: 'dbt_project'
version: '1.0.0'
config-version: 2
# This setting configures which "profile" dbt uses for this project.
profile: 'dbt_project'
# These configurations specify where dbt should look for different types of files.
# The `model-paths` config, for example, states that models in this project can be
# found in the "models/" directory. You probably won't need to change these!
model-paths: ["models"]
analysis-paths: ["analyses"]
test-paths: ["tests"]
seed-paths: ["seeds"]
macro-paths: ["macros"]
snapshot-paths: ["snapshots"]
clean-targets: # directories to be removed by `dbt clean`
- "target"
- "dbt_packages"
# Configuring models
# Full documentation: https://docs.getdbt.com/docs/configuring-models
# In this example config, we tell dbt to build all models in the example/
# directory as views. These settings can be overridden in the individual model
# files using the `{{ config(...) }}` macro.
models:
dbt_project:
# Config indicated by + and applies to all files under models/example/
staging:
+materialized: view
marts:
+materialized: view
================================================
FILE: weather_data_stack/dbt_project/macros/.gitkeep
================================================
================================================
FILE: weather_data_stack/dbt_project/models/marts/historial_weather_trends.sql
================================================
{% set start_date = '2023-01-01' %}
{% set end_date = '2023-12-31' %}
SELECT
JSON_EXTRACT_SCALAR(`current`, '$.location.name') AS location_name,
JSON_EXTRACT_SCALAR(`current`, '$.current.temperature') AS temperature,
DATE(JSON_EXTRACT_SCALAR(`current`, '$.current.observation_time')) AS date
FROM
transformed_data.stg_current_weather
WHERE
DATE(JSON_EXTRACT_SCALAR(`current`, '$.current.observation_time')) BETWEEN '{{ start_date }}' AND '{{ end_date }}'
ORDER BY
date, location_name
================================================
FILE: weather_data_stack/dbt_project/models/sources/weatherstack_source.yml
================================================
version: 2
sources:
- name: weatherstack
# Use your BigQuery project ID
database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}"
# Use your BigQuery dataset name
schema: weatherstack_airbyte
tables:
- name: current_weather
description: "Simulated current_weather data from the weatherstack connector."
columns:
- name: current
- name: location
- name: request
================================================
FILE: weather_data_stack/dbt_project/models/staging/stg_current_weather.sql
================================================
select
*
from {{ source('weatherstack', 'current_weather') }}
================================================
FILE: weather_data_stack/dbt_project/profiles.yml
================================================
dbt_project:
outputs:
dev:
dataset: transformed_data
job_execution_timeout_seconds: 300
job_retries: 1
# Use an env variable to indicate your JSON key file path
keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}"
location: US
method: service-account
priority: interactive
# Indicate your BigQuery project ID
project: your_project_id
threads: 1
type: bigquery
target: dev
================================================
FILE: weather_data_stack/dbt_project/seeds/.gitkeep
================================================
================================================
FILE: weather_data_stack/dbt_project/snapshots/.gitkeep
================================================
================================================
FILE: weather_data_stack/dbt_project/tests/.gitkeep
================================================
================================================
FILE: weather_data_stack/orchestration/orchestration/__init__.py
================================================
================================================
FILE: weather_data_stack/orchestration/orchestration/assets.py
================================================
import os
from dagster import OpExecutionContext
from dagster_dbt import DbtCliResource, dbt_assets
from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance
from .constants import dbt_manifest_path
@dbt_assets(manifest=dbt_manifest_path)
def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource):
yield from dbt.cli(["build"], context=context).stream()
airbyte_instance = AirbyteResource(
host="localhost",
port="8000",
# If using basic auth, include username and password:
username="airbyte",
password=os.getenv("AIRBYTE_PASSWORD")
)
airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance, key_prefix="faker")
================================================
FILE: weather_data_stack/orchestration/orchestration/constants.py
================================================
import os
from pathlib import Path
from dagster_dbt import DbtCliResource
dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve()
dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir))
# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime.
# Otherwise, we expect a manifest to be present in the project's target directory.
if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"):
dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait()
dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json")
else:
dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json")
================================================
FILE: weather_data_stack/orchestration/orchestration/definitions.py
================================================
import os
from dagster import Definitions
from dagster_dbt import DbtCliResource
from .assets import dbt_project_dbt_assets, airbyte_assets
from .constants import dbt_project_dir
from .schedules import schedules
defs = Definitions(
assets=[dbt_project_dbt_assets, airbyte_assets],
schedules=schedules,
resources={
"dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)),
},
)
================================================
FILE: weather_data_stack/orchestration/orchestration/schedules.py
================================================
"""
To add a daily schedule that materializes your dbt assets, uncomment the following lines.
"""
from dagster_dbt import build_schedule_from_dbt_selection
from .assets import dbt_project_dbt_assets
schedules = [
# build_schedule_from_dbt_selection(
# [dbt_project_dbt_assets],
# job_name="materialize_dbt_models",
# cron_schedule="0 0 * * *",
# dbt_select="fqn:*",
# ),
]
================================================
FILE: weather_data_stack/orchestration/pyproject.toml
================================================
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[tool.dagster]
module_name = "orchestration.definitions"
code_location_name = "orchestration"
================================================
FILE: weather_data_stack/orchestration/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="orchestration",
version="0.0.1",
packages=find_packages(),
install_requires=[
"dagster",
"dagster-cloud",
"dagster-dbt",
"dbt-core>=1.4.0",
"dbt-bigquery",
],
extras_require={
"dev": [
"dagster-webserver",
]
},
)
================================================
FILE: weather_data_stack/setup.py
================================================
from setuptools import find_packages, setup
setup(
name="airbyte-dbt-dagster",
packages=find_packages(),
install_requires=[
"dbt-bigquery",
"dagster",
"dagster-cloud",
"dagster-dbt",
"dagster-airbyte",
],
extras_require={"dev": ["dagit", "pytest"]},
)