[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2\n\njobs:\n\n  integration-postgres:\n    docker:\n      - image: cimg/python:3.11\n      - image: cimg/postgres:17.0\n        environment:\n          POSTGRES_USER: root\n    environment:\n      POSTGRES_HOST: localhost\n      POSTGRES_USER: root\n      DBT_ENV_SECRET_POSTGRES_PASS: ''\n      POSTGRES_PORT: 5432\n      POSTGRES_DATABASE: circle_test\n      POSTGRES_SCHEMA: codegen_integration_tests_postgres\n\n    steps:\n      - checkout\n      - run: pip install dbt-core dbt-postgres\n      - run:\n          name: \"Run Tests - Postgres\"\n          command: |\n            cd integration_tests\n            dbt --warn-error deps --target postgres\n            dbt --warn-error run-operation create_source_table --target postgres\n            dbt --warn-error seed --target postgres --full-refresh\n            dbt --warn-error run --target postgres\n            dbt --warn-error test --target postgres\n      - store_artifacts:\n          path: integration_tests/logs\n      - store_artifacts:\n          path: integration_tests/target\n\n    # The resource_class feature allows configuring CPU and RAM resources for each job. Different resource classes are available for different executors. https://circleci.com/docs/2.0/configuration-reference/#resourceclass\n    resource_class: large\n\n  integration-redshift:\n    docker:\n      - image: cimg/python:3.11\n    steps:\n      - checkout\n      - run: pip install dbt-core dbt-redshift\n      - run:\n          name: \"Run Tests - Redshift\"\n          command: |\n            cd integration_tests\n            dbt --warn-error deps --target redshift\n            dbt --warn-error run-operation create_source_table --target redshift\n            dbt --warn-error seed --target redshift --full-refresh\n            dbt --warn-error run --target redshift\n            dbt --warn-error test --target redshift\n      - store_artifacts:\n          path: integration_tests/logs\n      - store_artifacts:\n          path: integration_tests/target\n    # The resource_class feature allows configuring CPU and RAM resources for each job. Different resource classes are available for different executors. https://circleci.com/docs/2.0/configuration-reference/#resourceclass\n    resource_class: large\n\n  integration-snowflake:\n    docker:\n      - image: cimg/python:3.11\n    steps:\n      - checkout\n      - run: pip install dbt-core dbt-snowflake\n      - run:\n          name: \"Run Tests - Snowflake\"\n          command: |\n            cd integration_tests\n            dbt --warn-error deps --target snowflake\n            dbt --warn-error run-operation create_source_table --target snowflake\n            dbt --warn-error seed --target snowflake --full-refresh\n            dbt --warn-error run --target snowflake\n            dbt --warn-error test --target snowflake\n      - store_artifacts:\n          path: integration_tests/logs\n      - store_artifacts:\n          path: integration_tests/target\n    # The resource_class feature allows configuring CPU and RAM resources for each job. Different resource classes are available for different executors. https://circleci.com/docs/2.0/configuration-reference/#resourceclass\n    resource_class: large\n\n  integration-bigquery:\n    environment:\n      BIGQUERY_SERVICE_KEY_PATH: \"/home/circleci/bigquery-service-key.json\"\n    docker:\n      - image: cimg/python:3.11\n    steps:\n      - checkout\n      - run: pip install dbt-core dbt-bigquery\n      - run:\n          name: Setup Environment Variables\n          command: |\n            echo $BIGQUERY_SERVICE_ACCOUNT_JSON > $BIGQUERY_SERVICE_KEY_PATH\n            echo 'export BIGQUERY_KEYFILE_JSON=\"$BIGQUERY_SERVICE_ACCOUNT_JSON\"' >> \"$BASH_ENV\"\n      - run:\n          name: \"Run Tests - BigQuery\"\n          command: |\n            cd integration_tests\n            dbt --warn-error deps --target bigquery\n            dbt --warn-error run-operation create_source_table --target bigquery\n            dbt --warn-error seed --target bigquery --full-refresh\n            dbt --warn-error run --target bigquery\n            dbt --warn-error test --target bigquery\n      - store_artifacts:\n          path: integration_tests/logs\n      - store_artifacts:\n          path: integration_tests/target\n    # The resource_class feature allows configuring CPU and RAM resources for each job. Different resource classes are available for different executors. https://circleci.com/docs/2.0/configuration-reference/#resourceclass\n    resource_class: large\n\nworkflows:\n  version: 2\n  test-all:\n    jobs:\n      - integration-postgres:\n          context: profile-postgres\n      - integration-redshift:\n          context: profile-redshift\n          requires:\n            - integration-postgres\n      - integration-snowflake:\n          context: profile-snowflake\n          requires:\n            - integration-postgres\n      - integration-bigquery:\n          context: profile-bigquery\n          requires:\n            - integration-postgres\n"
  },
  {
    "path": ".github/CODEOWNERS",
    "content": "* @dbt-labs/dbt-package-owners\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Report a bug or an issue you've found with this package\ntitle: ''\nlabels: bug, triage\nassignees: ''\n\n---\n\n### Describe the bug\n<!---\nA clear and concise description of what the bug is. You can also use the issue title to do this\n--->\n\n### Steps to reproduce\n<!---\nIn as much detail as possible, please provide steps to reproduce the issue. Sample data that triggers the issue, example model code, etc is all very helpful here.\n--->\n\n### Expected results\n<!---\nA clear and concise description of what you expected to happen.\n--->\n\n### Actual results\n<!---\nA clear and concise description of what you expected to happen.\n--->\n\n### Screenshots and log output\n<!---\nIf applicable, add screenshots or log output to help explain your problem.\n--->\n\n### System information\n**The contents of your `packages.yml` file:**\n\n**Which database are you using dbt with?**\n- [ ] postgres\n- [ ] redshift\n- [ ] bigquery\n- [ ] snowflake\n- [ ] other (specify: ____________)\n\n\n**The output of `dbt --version`:**\n```\n<output goes here>\n```\n\n**The operating system you're using:**\n\n**The output of `python --version`:**\n\n### Additional context\n<!---\nAdd any other context about the problem here. For example, if you think you know which line of code is causing the issue.\n--->\n\n### Are you interested in contributing the fix?\n<!---\nLet us know if you want to contribute the fix, and whether would need a hand getting started\n--->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this package\ntitle: ''\nlabels: enhancement, triage\nassignees: ''\n\n---\n\n### Describe the feature\nA clear and concise description of what you want to happen.\n\n### Describe alternatives you've considered\nA clear and concise description of any alternative solutions or features you've considered.\n\n### Additional context\nIs this feature database-specific? Which database(s) is/are relevant? Please include any other relevant context here.\n\n### Who will this benefit?\nWhat kind of use case will this feature be useful for? Please be specific and provide examples, this will help us prioritize properly.\n\n### Are you interested in contributing this feature?\n<!---\nLet us know if you want to contribute the feature, and whether would need a hand getting started\n--->\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "resolves #\n\n### Problem\n\n<!---\n  Describe the problem this PR is solving. What is the application state\n  before this PR is merged?\n-->\n\n### Solution\n\n<!---\n  Describe the way this PR solves the above problem. Add as much detail as you\n  can to help reviewers understand your changes. Include any alternatives and\n  tradeoffs you considered.\n-->\n\n## Checklist\n- [ ] This code is associated with an [issue](https://github.com/dbt-labs/dbt-codegen/issues) which has been triaged and [accepted for development](https://docs.getdbt.com/docs/contributing/oss-expectations#pull-requests).\n- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-codegen/blob/main/CONTRIBUTING.md) and understand what's expected of me\n- [ ] I have run this code in development and it appears to resolve the stated issue\n- [ ] This PR includes tests, or tests are not required/relevant for this PR\n- [ ] I have updated the README.md (if applicable)\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "# **what?**\n# Run tests for dbt-codegen against supported adapters\n\n# **why?**\n# To ensure that dbt-codegen works as expected with all supported adapters\n\n# **when?**\n# On every PR, and every push to main and when manually triggered\n\nname: Package Integration Tests\n\non:\n    push:\n        branches:\n            - main\n    pull_request_target:\n    workflow_dispatch:\n\njobs:\n  run-tests:\n      uses: dbt-labs/dbt-package-testing/.github/workflows/run_tox.yml@v1\n      # this just tests with postgres so no variables need to be passed through.\n      # When it's time to add more adapters you will need to pass through inputs for\n      # the other adapters as shown in the below example for redshift\n      with:\n        # snowflake\n        SNOWFLAKE_USER: ${{ vars.SNOWFLAKE_USER }}\n        SNOWFLAKE_ROLE: ${{ vars.SNOWFLAKE_ROLE }}\n        SNOWFLAKE_DATABASE: ${{ vars.SNOWFLAKE_DATABASE }}\n        SNOWFLAKE_WAREHOUSE: ${{ vars.SNOWFLAKE_WAREHOUSE }}\n        SNOWFLAKE_SCHEMA: \"integration_tests_snowflake_${{ github.run_number }}\"\n        # bigquery\n        BIGQUERY_PROJECT: ${{ vars.BIGQUERY_PROJECT }}\n        BIGQUERY_SCHEMA: \"integration_tests_bigquery_${{ github.run_number }}\"\n        # redshift\n        REDSHIFT_HOST: ${{ vars.REDSHIFT_HOST }}\n        REDSHIFT_USER: ${{ vars.REDSHIFT_USER }}\n        REDSHIFT_DATABASE: ${{ vars.REDSHIFT_DATABASE }}\n        REDSHIFT_SCHEMA: \"integration_tests_redshift_${{ github.run_number }}\"\n        REDSHIFT_PORT: ${{ vars.REDSHIFT_PORT }}\n      secrets:\n        SNOWFLAKE_ACCOUNT: ${{ secrets.SNOWFLAKE_ACCOUNT }}\n        DBT_ENV_SECRET_SNOWFLAKE_PASS: ${{ secrets.SNOWFLAKE_PASS }}\n        DBT_ENV_SECRET_REDSHIFT_PASS: ${{ secrets.REDSHIFT_PASS }}\n        BIGQUERY_KEYFILE_JSON: ${{ secrets.BIGQUERY_KEYFILE_JSON }}\n"
  },
  {
    "path": ".github/workflows/stale.yml",
    "content": "# **what?**\n# For issues that have been open for awhile without activity, label\n# them as stale with a warning that they will be closed out. If\n# anyone comments to keep the issue open, it will automatically\n# remove the stale label and keep it open.\n\n# Stale label rules:\n# awaiting_response, more_information_needed -> 90 days\n# good_first_issue, help_wanted -> 360 days (a year)\n# tech_debt -> 720 (2 years)\n# all else defaults -> 180 days (6 months)\n\n# **why?**\n# To keep the repo in a clean state from issues that aren't relevant anymore\n\n# **when?**\n# Once a day\n\nname: \"Close stale issues and PRs\"\non:\n  schedule:\n    - cron: \"30 1 * * *\"\n\npermissions:\n  issues: write\n  pull-requests: write\n\njobs:\n  stale:\n    uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main\n"
  },
  {
    "path": ".github/workflows/triage-labels.yml",
    "content": "# **what?**\n# When the maintenance team triages, we sometimes need more information from the issue creator.  In\n# those cases we remove the `triage` label and add the `awaiting_response` label.  Once we\n# recieve a response in the form of a comment, we want the `awaiting_response` label removed\n# in favor of the `triage` label so we are aware that the issue needs action.\n\n# **why?**\n# To help with out team triage issue tracking\n\n# **when?**\n# This will run when a comment is added to an issue and that issue has the `awaiting_response` label.\n\nname: Update Triage Label\n\non: issue_comment\n\ndefaults:\n  run:\n    shell: bash\n\npermissions:\n  issues: write\n\njobs:\n  triage_label:\n    if: contains(github.event.issue.labels.*.name, 'awaiting_response')\n    uses: dbt-labs/actions/.github/workflows/swap-labels.yml@main\n    with:\n      add_label: \"triage\"\n      remove_label: \"awaiting_response\"\n    secrets: inherit\n"
  },
  {
    "path": ".gitignore",
    "content": "target/\ndbt_modules/\ndbt_packages/\nlogs/\nenv*/\n.venv/\n.env/\nvenv/\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# dbt-codegen v0.13.1\n\n## What's Changed\n\n## Under the hood\n\n* Temporarily remove CI test for case-sensitive identifiers when generating sources by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/230\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.13.0...0.13.1\n\n# dbt-codegen v0.13.0\n\n## What's Changed\n\n### Features\n\n* Read upstream descriptions from sources by @esegal in https://github.com/dbt-labs/dbt-codegen/pull/154\n* Parameters in `generate_source` for case-sensitive identifiers by @pnadolny13 in https://github.com/dbt-labs/dbt-codegen/pull/168\n\n### Fixes\n\n* Escape upstream descriptions in generate_model_yaml by @wircho in https://github.com/dbt-labs/dbt-codegen/pull/159\n* Fix quoted identifiers in the `generate_base_model` macro for BigQuery by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/199\n\n### Docs\n\n* fix generate_source example by @yatsky in https://github.com/dbt-labs/dbt-codegen/pull/164\n* Improve developer README by @gwenwindflower in https://github.com/dbt-labs/dbt-codegen/pull/163\n* Fix bad spacing in dev README by @gwenwindflower in https://github.com/dbt-labs/dbt-codegen/pull/170\n* Changelogs for 0.12.0, 0.12.1, and 0.13.0-b1 by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/196\n\n## Under the hood\n\n* Restore CI test for case-sensitive identifiers when generating sources by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/192\n* Remove Redshift-specific logic for toggling case-sensitive identifiers by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/208\n* Use the `cimg/postgres` Docker image by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/214\n* Independent CircleCI workflow job for each tested adapter by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/215\n* Simplify environment variables for BigQuery in CircleCI by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/216\n* Stop installing prereleases from PyPI in favor of stable releases only by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/220\n* Upgrade to Python 3.11 in CircleCI by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/222\n* Use dynamic schema names rather than hardcoded ones by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/224\n* Add support for postgres testing in GitHub CI via tox by @emmyoop by @emmyoop in https://github.com/dbt-labs/dbt-codegen/pull/181\n* Add support for snowflake testing in GitHub CI via tox by @emmyoop in https://github.com/dbt-labs/dbt-codegen/pull/198\n* Add support for redshift testing in GitHub CI via tox by @emmyoop in https://github.com/dbt-labs/dbt-codegen/pull/204\n* Add support for bigquery testing in GitHub CI via tox by @emmyoop in https://github.com/dbt-labs/dbt-codegen/pull/203\n\n## New Contributors\n* @wircho made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/159\n* @esegal made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/154\n* @yatsky made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/164\n* @gwenwindflower made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/163\n* @pnadolny13 made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/168\n* @emmyoop made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/181\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.12.1...0.13.0\n\n# dbt-codegen v0.13.0-b1\n\n## What's Changed\n\n### Features\n\n* Read upstream descriptions from sources by @esegal in https://github.com/dbt-labs/dbt-codegen/pull/154\n* Case sensitive generate source by @pnadolny13 in https://github.com/dbt-labs/dbt-codegen/pull/168\n\n### Fixes\n\n* Escape upstream descriptions in generate_model_yaml by @wircho in https://github.com/dbt-labs/dbt-codegen/pull/159\n\n### Docs\n\n* fix generate_source example by @yatsky in https://github.com/dbt-labs/dbt-codegen/pull/164\n* Improve developer README by @gwenwindflower in https://github.com/dbt-labs/dbt-codegen/pull/163\n* Fix bad spacing in dev README by @gwenwindflower in https://github.com/dbt-labs/dbt-codegen/pull/170\n* Update Changelog by @gwenwindflower in https://github.com/dbt-labs/dbt-codegen/pull/174\n\n## New Contributors\n\n- @wircho made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/159\n- @yatsky made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/164\n- @pnadolny13 made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/168\n- @esegal made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/154\n- @gwenwindflower made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/163\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.12.1...v0.13.0-b1\n\n# dbt-codegen v0.12.1\n\n## What's Changed\n* Add dispatch to macros by @jeremyyeo in https://github.com/dbt-labs/dbt-codegen/pull/148\n* Remove terminal output in the generated file. by @vijmen in https://github.com/dbt-labs/dbt-codegen/pull/149\n\n## New Contributors\n* @jeremyyeo made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/148\n* @vijmen made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/149\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.12.0...0.12.1\n\n# dbt-codegen v0.12.0\n\n## What's Changed\n* Use print for outputting codegen by @JorgenG in https://github.com/dbt-labs/dbt-codegen/pull/86\n\n## New Contributors\n* @JorgenG made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/86\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.11.0...0.12.0\n\n# dbt-codegen v0.11.0\n\n## 🚨 Breaking change\n\n`include_data_types` parameter added to `generate_model_yaml` and behavior changed for `generate_source`. Both default to `true`\nand are lowercase to align with the dbt style guide. Scale & precision are **not** included. Previous logic for `generate_source` defaulted to `false` and the resulting data types were uppercase and included scale & precision ([#122](https://github.com/dbt-labs/dbt-codegen/pull/122)).\n\n[Dispatch](https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch) can be used to utilize the column data type formatting of previous versions. Namely, by adding this macro to your project:\n\n```sql\n{% macro default__data_type_format_source(column) %}\n    {{ return(column.data_type | upper) }}\n{% endmacro %}\n```\n\nAnd then adding this within `dbt_project.yml`:\n\n```yaml\ndispatch:\n  - macro_namespace: codegen\n    search_order: [\"my_project\", \"codegen\"]\n```\n\n## What's Changed\n\n- GitHub Action to add/remove triage labels as-needed by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/133\n- GitHub Action to close issues as stale as-needed by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/134\n- Update README.md by @cohms in https://github.com/dbt-labs/dbt-codegen/pull/129\n- Remove hard-coded values for database and schema by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/139\n- Instructions for the release process by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/137\n- Add `include_data_types` argument to `generate_model_yaml` macro by @linbug in https://github.com/dbt-labs/dbt-codegen/pull/122\n\n## New Contributors\n\n- @cohms made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/129\n- @linbug made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/122\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.10.0...v0.10.0\n\n# dbt-codegen v0.10.0\n\n## What's Changed\n\n- added comments to verbose regex in generate_model_import_ctes by @graciegoheen in https://github.com/dbt-labs/dbt-codegen/pull/93\n- Feature/hackathon model generator by @fivetran-joemarkiewicz in https://github.com/dbt-labs/dbt-codegen/pull/83\n- Suggestion to include packages.yml example in README.md by @Maayan-s in https://github.com/dbt-labs/dbt-codegen/pull/77\n- Add include_data_types flag to generate_source macro by @GSokol in https://github.com/dbt-labs/dbt-codegen/pull/76\n- Expected result of nested struct in BigQuery by @dbeatty10 in https://github.com/dbt-labs/dbt-codegen/pull/105\n- issue106/get_models helper macro by @erkanncelen in https://github.com/dbt-labs/dbt-codegen/pull/115\n- Feat/generate sources add database and schema by @jeremyholtzman in https://github.com/dbt-labs/dbt-codegen/pull/124\n\n## New Contributors\n\n- @fivetran-joemarkiewicz made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/83\n- @Maayan-s made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/77\n- @GSokol made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/76\n- @erkanncelen made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/115\n- @jeremyholtzman made their first contribution in https://github.com/dbt-labs/dbt-codegen/pull/124\n\n**Full Changelog**: https://github.com/dbt-labs/dbt-codegen/compare/0.9.0...0.10.0\n\n# dbt-codegen v0.9.0\n\n# dbt-codegen v0.8.1\n\n# dbt-codegen v0.8.0\n\n# Unreleased\n\n## Breaking changes\n\n## New features\n\n## Quality of life\n\n- Now uses `print` instead of `log` to output the generated text into the console. This enables you to invoke dbt with the `--quiet` flag and directly pipe the codegen output into a new file, ending up with valid yaml\n\n## Under the hood\n\n## Contributors:\n\n- [@JorgenG](https://github.com/JorgenG) (#86)\n\n# dbt-codegen v0.7.0\n\n## 🚨 Breaking change\n\n- Add support for including description placeholders for the source and table, which changes the behavior of `generate_source` when `include_descriptions` is set to `True`. Previous logic only created description placeholders for the columns ([#64](https://github.com/dbt-labs/dbt-codegen/issues/64), [#66](https://github.com/dbt-labs/dbt-codegen/pull/66))\n\n## New features\n\n- Add optional `table_names` arg to `generate_source` ([#50](https://github.com/dbt-labs/dbt-codegen/issues/50), [#51](https://github.com/dbt-labs/dbt-codegen/pull/51))\n- Add support for importing descriptions from columns with the same names in upstream models. It is available by setting the parameter `upstream_descriptions` to `True` in `generate_model_yaml` ([#61](https://github.com/dbt-labs/dbt-codegen/pull/61))\n- Added `case_sensitive_cols` argument to `generate_base_model` macro ([#63](https://github.com/dbt-labs/dbt-codegen/pull/63))\n- Add optional `name` arg to `generate_source` ([#64](https://github.com/dbt-labs/dbt-codegen/issues/64), [#66](https://github.com/dbt-labs/dbt-codegen/pull/66))\n\n## Fixes\n\n- `generate_model_yaml` now correctly handles nested `STRUCT` fields in BigQuery ([#27](https://github.com/dbt-labs/dbt-codegen/issues/27), [#54](https://github.com/dbt-labs/dbt-codegen/pull/54))\n\n## Contributors:\n\n- [@rahulj51](https://github.com/rahulj51) (#51)\n- [@bodschut](https://github.com/bodschut) (#54)\n- [@b-per](https://github.com/b-per) (#61)\n- [@graciegoheen](https://github.com/graciegoheen) (#63)\n- [@kbrock91](https://github.com/kbrock91) (#66)\n\n# dbt-codegen v0.6.0\n\nThis release creates breaking changes to the `generate_source.sql` macro.\n\n## Features\n\n- add optional `table_pattern` argument to `generate_source.sql` macro. Default value is '%' to pull all tables in the raw data schema to preserve existing behavior if the `table_pattern` argument is not specified by the user.\n\n# dbt-codegen v0.5.0\n\nThis release supports any version (minor and patch) of v1, which means far less need for compatibility releases in the future.\n\n## Under the hood\n\n- Change `require-dbt-version` to `[\">=1.0.0\", \"<2.0.0\"]`\n- Bump dbt-utils dependency\n- Replace `source-paths` and `data-paths` with `model-paths` and `seed-paths` respectively\n- Rename `data` and `analysis` directories to `seeds` and `analyses` respectively\n- Replace `dbt_modules` with `dbt_packages` in `clean-targets`\n\n# dbt-codegen v0.4.1\n\n🚨 This is a compatibility release in preparation for `dbt-core` v1.0.0 (🎉). Projects using this version with `dbt-core` v1.0.x can expect to see a deprecation warning. This will be resolved in the next minor release.\n\n# dbt-codegen v0.4.0\n\n## Breaking changes\n\n- Requires `dbt>=0.20.0` and `dbt-utils>=0.7.0`\n- Depends on `dbt-labs/dbt_utils` (instead of `fishtown-analytics/dbt_utils`)\n\n## Features\n\n- Add optional `leading_commas` arg to `generate_base_model` (#41 @jaypeedevlin)\n- Add optional `include_descriptions` arg to `generate_source` (#40 @djbelknapdbs)\n\n## Fixes\n\n- In the `generate_source` macro, use `dbt_utils.get_relations_by_pattern` instead of `get_relations_by_prefix`, since the latter will be deprecated in the future (#42)\n\n## Under the hood\n\n- Use new adapter.dispatch syntax (#44)\n\n# dbt-codegen v0.3.2\n\nThis is a quality of life release\n\n## Other\n\n- Fix rendering issues on hub.getdbt.com\n- Fix integration tests due to python version compatibility\n\n# dbt-codegen v0.3.1\n\nThis is a bugfix release\n\n## Fixes\n\n- Use latest version of dbt-utils (0.6.2) to ensure generate_source_yaml works for non-target schemata (#34)\n\n# dbt-codegen v0.3.0\n\n## 🚨 Breaking change\n\nThis release requires dbt v0.18.0, and dbt-utils v0.6.1. If you're not ready to upgrade, consider using a previous release of this package.\n\n## Quality of life\n\n- Use dbt v0.18.0 (#31)\n- Fix README rendering on hub (#32 @calvingiles)\n\n# dbt-codegen v0.2.0\n\n## 🚨 Breaking change\n\nThe lower bound of `dbt-utils` is now `0.4.0`.\n\nThis won't affect most users, since you're likely already using version of dbt-utils higher than this to achieve 0.17.0 compatibility.\n\n## Quality of life:\n\n- Change dbt-utils dependencies to `[>=0.4.0, <0.6.0]` (#29)\n- Fix tests (#29)\n\n# dbt-codegen v0.1.0\n\n## 🚨 Breaking change!\n\nThis package now requires dbt v0.17.x!\n\n## Features:\n\n- Add `generate_model_yaml` (#18 @jtalmi)\n\n## Under the hood:\n\n- Update to v0.17.0, including `dbt_project.yml` version 2 syntax (#23)\n- Add GitHub templates and installation instructions (#23)\n\n## Acknowledgements\n\n@marzaccaro made a PR for `generate_model_yaml`, and, although I had reviewed it, I let the PR go stale and somehow completely forgot about it when merging PR #18 — this is completely my bad! So equal credit to @marzaccaro and @jtalmi for their work :clap:\n\n# dbt-codegen v0.0.4\n\nThis is a bugfix release to improve compatibility with Snowflake\n\n# dbt-codegen v0.0.3\n\nBump utils version range\n\n# dbt-codegen v0.0.2\n\nSmall quality of life improvements\n\n# dbt-codegen v0.0.1\n\nInitial release\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing to `dbt-codegen`\n\n`dbt-codegen` is open source software. It is what it is today because community members have opened issues, provided feedback, and [contributed to the knowledge loop](https://www.getdbt.com/dbt-labs/values/). Whether you are a seasoned open source contributor or a first-time committer, we welcome and encourage you to contribute code, documentation, ideas, or problem statements to this project.\n\nRemember: all PRs (apart from cosmetic fixes like typos) should be [associated with an issue](https://docs.getdbt.com/docs/contributing/oss-expectations#pull-requests).\n\n1. [About this document](#about-this-document)\n1. [Getting the code](#getting-the-code)\n1. [Setting up an environment](#setting-up-an-environment)\n1. [Implementation guidelines](#implementation-guidelines)\n1. [Testing dbt-codegen](#testing)\n1. [Adding CHANGELOG Entry](#adding-changelog-entry)\n1. [Submitting a Pull Request](#submitting-a-pull-request)\n\n## About this document\n\nThere are many ways to contribute to the ongoing development of `dbt-codegen`, such as by participating in discussions and issues. We encourage you to first read our higher-level document: [\"Expectations for Open Source Contributors\"](https://docs.getdbt.com/docs/contributing/oss-expectations).\n\nThe rest of this document serves as a more granular guide for contributing code changes to `dbt-codegen` (this repository). It is not intended as a guide for using `dbt-codegen`, and some pieces assume a level of familiarity with Python development (virtualenvs, `pip`, etc). Specific code snippets in this guide assume you are using macOS or Linux and are comfortable with the command line.\n\n### Notes\n\n- **CLA:** Please note that anyone contributing code to `dbt-codegen` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements). If you are unable to sign the CLA, the `dbt-codegen` maintainers will unfortunately be unable to merge any of your Pull Requests. We welcome you to participate in discussions, open issues, and comment on existing ones.\n- **Branches:** All pull requests from community contributors should target the `main` branch (default). If the change is needed as a patch for a version of `dbt-codegen` that has already been released (or is already a release candidate), a maintainer will backport the changes in your PR to the relevant branch.\n\n## Getting the code\n\n### Installing git\n\nYou will need `git` in order to download and modify the `dbt-codegen` source code. On macOS, the best way to download git is to just install [Xcode](https://developer.apple.com/support/xcode/).\n\n### External contributors\n\nIf you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt-codegen` by forking the `dbt-codegen` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:\n\n1. Fork the `dbt-codegen` repository\n2. Clone your fork locally\n3. Check out a new branch for your proposed changes\n4. Push changes to your fork\n5. Open a pull request against `dbt-labs/dbt-codegen` from your forked repository\n\n### dbt Labs contributors\n\nIf you are a member of the `dbt-labs` GitHub organization, you will have push access to the `dbt-codegen` repo. Rather than forking `dbt-codegen` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.\n\n## Setting up an environment\n\nThere are some tools that will be helpful to you in developing locally. While this is the list relevant for `dbt-codegen` development, many of these tools are used commonly across open-source python projects.\n\n### Tools\n\nThese are the tools used in `dbt-codegen` development and testing:\n- [`make`](https://users.cs.duke.edu/~ola/courses/programming/Makefiles/Makefiles.html) to run multiple setup or test steps in combination. Don't worry too much, nobody _really_ understands how `make` works, and our Makefile aims to be super simple.\n- [CircleCI](https://circleci.com/) for automating tests and checks, once a PR is pushed to the `dbt-codegen` repository\n\nA deep understanding of these tools in not required to effectively contribute to `dbt-codegen`, but we recommend checking out the attached documentation if you're interested in learning more about each one.\n\n## Testing\n\nOnce you're able to manually test that your code change is working as expected, it's important to run existing automated tests, as well as adding some new ones. These tests will ensure that:\n- Your code changes do not unexpectedly break other established functionality\n- Your code changes can handle all known edge cases\n- The functionality you're adding will _keep_ working in the future\n\nSee here for details for running existing integration tests and adding new ones:\n- [integration_tests/README.md](integration_tests/README.md)\n\n## Adding CHANGELOG Entry\n\nWe use [automatically generated release notes](https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost.\n\nYou don't need to worry about which `dbt-codegen` version your change will go into. Just create the changelog entry at the top of CHANGELOG.md and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-codegen`. The maintainers _may_ choose to \"backport\" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-codegen`.\n\n## Submitting a Pull Request\n\nA `dbt-codegen` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or integration test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.\n\nAutomated tests run via CircleCI. If you're a first-time contributor, all tests (including code checks and unit tests) will require a maintainer to approve. Changes in the `dbt-codegen` repository trigger integration tests.\n\nOnce all tests are passing and your PR has been approved, a `dbt-codegen` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": ".DEFAULT_GOAL:=help\n\n.PHONY: test\ntest: ## Run the integration tests.\n\t@./run_test.sh $(target)\n\n.PHONY: test_tox\ntest: ## Run the integration tests with tox\n\t@\\\n\ttox -e dbt_integration_$(target)\n\n.PHONY: dev\ndev: ## Installs dbt-* packages in develop mode along with development dependencies.\n\t@\\\n\techo \"Install dbt-$(target)...\"; \\\n\tpython -m pip install --upgrade pip setuptools; \\\n\tpython -m pip install dbt-core \"dbt-$(target)\";\n\n.PHONY: setup-db\nsetup-db: ## Setup Postgres database with docker-compose for system testing.\n\t@\\\n\tdocker-compose up --detach postgres\n\n.PHONY: help\nhelp: ## Show this help message.\n\t@echo 'usage: make [target]'\n\t@echo\n\t@echo 'targets:'\n\t@grep -E '^[8+a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n"
  },
  {
    "path": "README.md",
    "content": "# dbt-codegen\n\nMacros that generate dbt code, and log it to the command line.\n\n# Contents\n\n- [dbt-codegen](#dbt-codegen)\n- [Contents](#contents)\n- [Installation instructions](#installation-instructions)\n- [Macros](#macros)\n  - [generate_source (source)](#generate_source-source)\n    - [Arguments](#arguments)\n    - [Usage:](#usage)\n  - [generate_base_model (source)](#generate_base_model-source)\n    - [Arguments:](#arguments-1)\n    - [Usage:](#usage-1)\n  - [create_base_models (source)](#create_base_models-source)\n    - [Arguments:](#arguments-2)\n    - [Usage:](#usage-2)\n  - [base_model_creation (source)](#base_model_creation-source)\n    - [Arguments:](#arguments-3)\n    - [Usage:](#usage-3)\n  - [generate_model_yaml (source)](#generate_model_yaml-source)\n    - [Arguments:](#arguments-4)\n    - [Usage:](#usage-4)\n  - [generate_model_import_ctes (source)](#generate_model_import_ctes-source)\n    - [Arguments:](#arguments-5)\n    - [Usage:](#usage-5)\n  - [generate_unit_test_template (source)](#generate_unit_test_template-source)\n    - [Arguments:](#arguments-6)\n    - [Usage:](#usage-6)\n- [Contributing](#contributing)\n\n# Installation instructions\n\nNew to dbt packages? Read more about them [here](https://docs.getdbt.com/docs/building-a-dbt-project/package-management/).\n\n1. Include this package in your `packages.yml` file — check [here](https://hub.getdbt.com/dbt-labs/codegen/latest/) for the latest version number:\n\n```yml\npackages:\n  - package: dbt-labs/codegen\n    version: X.X.X ## update to latest version here\n```\n\n2. Run `dbt deps` to install the package.\n\n# Macros\n\n## generate_source ([source](macros/generate_source.sql))\n\nThis macro generates lightweight YAML for a [Source](https://docs.getdbt.com/docs/using-sources),\nwhich you can then paste into a schema file.\n\n### Arguments\n\n- `schema_name` (required): The schema name that contains your source data\n- `database_name` (optional, default=target.database): The database that your\n  source data is in.\n- `table_names` (optional, default=none): A list of tables that you want to generate the source definitions for.\n- `generate_columns` (optional, default=False): Whether you want to add the\n  column names to your source definition.\n- `include_descriptions` (optional, default=False): Whether you want to add\n  description placeholders to your source definition.\n- `include_data_types` (optional, default=True): Whether you want to add data\n  types to your source columns definitions.\n- `table_pattern` (optional, default='%'): A table prefix / postfix that you\n  want to subselect from all available tables within a given schema.\n- `exclude` (optional, default=''): A string you want to exclude from the selection criteria\n- `name` (optional, default=schema_name): The name of your source\n- `include_database` (optional, default=False): Whether you want to add\n  the database to your source definition\n- `include_schema` (optional, default=False): Whether you want to add\n  the schema to your source definition\n- `case_sensitive_databases` (optional, default=False): Whether you want database names to be\n  in lowercase, or to match the case in the source table — not compatible with Redshift\n- `case_sensitive_schemas` (optional, default=False): Whether you want schema names to be\n  in lowercase, or to match the case in the source table — not compatible with Redshift\n- `case_sensitive_tables` (optional, default=False): Whether you want table names to be\n  in lowercase, or to match the case in the source table — not compatible with Redshift\n- `case_sensitive_cols` (optional, default=False): Whether you want column names to be\n  in lowercase, or to match the case in the source table\n\n### Outputting to a file\n\nIf you use the `dbt run-operation` approach it is possible to output directly to a file by piping the output to a new file and using the `--quiet` CLI flag:\n\n```\ndbt --quiet run-operation generate_source --args '{\"table_names\": [\"orders\"]}' > models/staging/jaffle_shop/_sources.yml\n```\n\n### Usage:\n\n1. Copy the macro into a statement tab in the dbt Cloud IDE, or into an analysis file, and compile your code\n\n```\n{{ codegen.generate_source('raw_jaffle_shop') }}\n```\n\nor for multiple arguments\n\n```\n{{ codegen.generate_source(schema_name= 'jaffle_shop', database_name= 'raw') }}\n```\n\nAlternatively, call the macro as an [operation](https://docs.getdbt.com/docs/using-operations):\n\n```\n$ dbt run-operation generate_source --args 'schema_name: raw_jaffle_shop'\n```\n\nor\n\n```\n# for multiple arguments, use the dict syntax\n$ dbt run-operation generate_source --args '{\"schema_name\": \"jaffle_shop\", \"database_name\": \"raw\", \"table_names\":[\"table_1\", \"table_2\"]}'\n```\n\nor if you want to include column names and data types:\n\n```\n$ dbt run-operation generate_source --args '{\"schema_name\": \"jaffle_shop\", \"generate_columns\": true}'\n```\n\nor if you want to include column names without data types (the behavior dbt-codegen <= v0.9.0):\n\n```\n$ dbt run-operation generate_source --args '{\"schema_name\": \"jaffle_shop\", \"generate_columns\": true, \"include_data_types\": false}'\n```\n\n2. The YAML for the source will be logged to the command line\n\n```\nversion: 2\n\nsources:\n  - name: raw_jaffle_shop\n    database: raw\n    schema: raw_jaffle_shop\n    tables:\n      - name: customers\n        description: \"\"\n      - name: orders\n        description: \"\"\n      - name: payments\n        description: \"\"\n```\n\n3. Paste the output in to a schema `.yml` file, and refactor as required.\n\n## generate_base_model ([source](macros/generate_base_model.sql))\n\nThis macro generates the SQL for a base model, which you can then paste into a\nmodel.\n\n### Arguments:\n\n- `source_name` (required): The source you wish to generate base model SQL for.\n- `table_name` (required): The source table you wish to generate base model SQL for.\n- `leading_commas` (optional, default=False): Whether you want your commas to be leading (vs trailing).\n- `case_sensitive_cols ` (optional, default=False): Whether your source table has case sensitive column names. If true, keeps the case of the column names from the source.\n- `materialized` (optional, default=None): Set materialization style (e.g. table, view, incremental) inside of the model's `config` block. If not set, materialization style will be controlled by `dbt_project.yml`\n\n### Usage:\n\n1. Create a source for the table you wish to create a base model on top of.\n2. Copy the macro into a statement tab in the dbt Cloud IDE, or into an analysis file, and compile your code\n\n```\n{{ codegen.generate_base_model(\n    source_name='raw_jaffle_shop',\n    table_name='customers',\n    materialized='table'\n) }}\n```\n\nAlternatively, call the macro as an [operation](https://docs.getdbt.com/docs/using-operations):\n\n```\n$ dbt run-operation generate_base_model --args '{\"source_name\": \"raw_jaffle_shop\", \"table_name\": \"customers\"}'\n```\n\n3. The SQL for a base model will be logged to the command line\n\n```\nwith source as (\n\n    select * from {{ source('raw_jaffle_shop', 'customers') }}\n\n),\n\nrenamed as (\n\n    select\n        id,\n        first_name,\n        last_name,\n        email,\n        _elt_updated_at\n\n    from source\n\n)\n\nselect * from renamed\n```\n\n4. Paste the output in to a model, and refactor as required.\n\n## create_base_models ([source](macros/create_base_models.sql))\n\nThis macro generates a series of terminal commands (appended with the `&&` to allow for subsequent execution) that execute the [base_model_creation](#base_model_creation-source) bash script. This bash script will write the output of the [generate_base_model](#generate_base_model-source) macro into a new model file in your local dbt project.\n\n> **Note**: This macro is not compatible with the dbt Cloud IDE.\n\n### Arguments:\n\n- `source_name` (required): The source you wish to generate base model SQL for.\n- `tables` (required): A list of all tables you want to generate the base models for.\n\n### Usage:\n\n1. Create a source for the table you wish to create a base model on top of.\n2. Copy the macro into a statement tab into your local IDE, and run your code\n\n```sql\ndbt run-operation codegen.create_base_models --args '{source_name: my-source, tables: [\"this-table\",\"that-table\"]}'\n```\n\n## base_model_creation ([source](bash_scripts/base_model_creation.sh))\n\nThis bash script when executed from your local IDE will create model files in your dbt project instance that contain the outputs of the [generate_base_model](macros/generate_base_model.sql) macro.\n\n> **Note**: This macro is not compatible with the dbt Cloud IDE.\n\n### Arguments:\n\n- `source_name` (required): The source you wish to generate base model SQL for.\n- `table_name` (required): A single table name for which you want to generate the base model.\n\n### Usage:\n\n1. Create a source for the table you wish to create a base model on top of.\n2. Copy the macro into a statement tab into your local IDE, and run your code\n\n```bash\nsource dbt_packages/codegen/bash_scripts/base_model_creation.sh \"source_name\" \"table_name\"\n```\n\n## generate_model_yaml ([source](macros/generate_model_yaml.sql))\n\nThis macro generates the YAML for a list of model(s), which you can then paste into a\nschema.yml file.\n\n### Arguments:\n\n- `model_names` (required): The model(s) you wish to generate YAML for.\n- `upstream_descriptions` (optional, default=False): Whether you want to include descriptions for identical column names from upstream models and sources.\n- `include_data_types` (optional, default=True): Whether you want to add data types to your model column definitions.\n\n### Usage:\n\n1. Create a model.\n2. Copy the macro into a statement tab in the dbt Cloud IDE, or into an analysis file, and compile your code\n\n```\n{{ codegen.generate_model_yaml(\n    model_names=['customers']\n) }}\n```\n\nYou can use the helper function codegen.get_models and specify a directory and/or prefix to get a list of all matching models, to be passed into model_names list.\n\n```\n{% set models_to_generate = codegen.get_models(directory='marts', prefix='fct_') %}\n{{ codegen.generate_model_yaml(\n    model_names = models_to_generate\n) }}\n```\n\nAlternatively, call the macro as an [operation](https://docs.getdbt.com/docs/using-operations):\n\n```\n$ dbt run-operation generate_model_yaml --args '{\"model_names\": [\"customers\"]}'\n```\n\n3. The YAML for a base model(s) will be logged to the command line\n\n```\nversion: 2\n\nmodels:\n  - name: customers\n    description: \"\"\n    columns:\n      - name: customer_id\n        data_type: integer\n        description: \"\"\n      - name: customer_name\n        data_type: text\n        description: \"\"\n```\n\n4. Paste the output in to a schema.yml file, and refactor as required.\n\n## generate_model_import_ctes ([source](macros/generate_model_import_ctes.sql))\n\nThis macro generates the SQL for a given model with all references pulled up into import CTEs, which you can then paste back into the model.\n\n### Arguments:\n\n- `model_name` (required): The model you wish to generate SQL with import CTEs for.\n- `leading_commas` (optional, default=False): Whether you want your commas to be leading (vs trailing).\n\n### Usage:\n\n1. Create a model with your original SQL query\n2. Copy the macro into a statement tab in the dbt Cloud IDE, or into an analysis file, and compile your code\n\n```\n{{ codegen.generate_model_import_ctes(\n    model_name = 'my_dbt_model'\n) }}\n```\n\nAlternatively, call the macro as an [operation](https://docs.getdbt.com/docs/using-operations):\n\n```\n$ dbt run-operation generate_model_import_ctes --args '{\"model_name\": \"my_dbt_model\"}'\n```\n\n3. The new SQL - with all references pulled up into import CTEs - will be logged to the command line\n\n```\nwith customers as (\n\n    select * from {{ ref('stg_customers') }}\n\n),\n\norders as (\n\n    select * from {{ ref('stg_orders') }}\n\n),\n\npayments as (\n\n    select * from {{ ref('stg_payments') }}\n\n),\n\ncustomer_orders as (\n\n    select\n        customer_id,\n        min(order_date) as first_order,\n        max(order_date) as most_recent_order,\n        count(order_id) as number_of_orders\n    from orders\n    group by customer_id\n\n),\n\ncustomer_payments as (\n\n    select\n        orders.customer_id,\n        sum(amount) as total_amount\n    from payments\n    left join orders on\n         payments.order_id = orders.order_id\n    group by orders.customer_id\n\n),\n\nfinal as (\n\n    select\n        customers.customer_id,\n        customers.first_name,\n        customers.last_name,\n        customer_orders.first_order,\n        customer_orders.most_recent_order,\n        customer_orders.number_of_orders,\n        customer_payments.total_amount as customer_lifetime_value\n    from customers\n    left join customer_orders\n        on customers.customer_id = customer_orders.customer_id\n    left join customer_payments\n        on  customers.customer_id = customer_payments.customer_id\n\n)\n\nselect * from final\n```\n\n4. Replace the contents of the model's current SQL file with the compiled or logged code\n\n## generate_unit_test_template ([source](macros/generate_unit_test_template.sql))\n\nThis macro generates the unit testing YAML for a given model with all references included as `given` inputs (along with their columns), plus the columns within the expected output.\n\n### Arguments:\n\n- `model_name` (required): The model you wish to generate unit testing YAML for.\n- `inline_columns` (optional, default=False): Whether you want all columns on the same line.\n\n### Usage:\n\n1. Create a model with your original SQL query\n2. Call the macro as an [operation](https://docs.getdbt.com/docs/using-operations):\n\n```\n$ dbt run-operation generate_unit_test_template --args '{\"model_name\": \"order_items\", \"inline_columns\": true}'\n```\n\n3. The new YAML - with all given inputs included - will be logged to the command line\n\n```yaml\nunit_tests:\n  - name: unit_test_order_items\n    model: order_items\n\n    given:\n      - input: ref(\"stg_order_items\")\n        rows:\n          - col_a: \n            col_b: \n\n    expect:\n      rows:\n        - id: \n```\n\n4. Create a new YAML file with the compiled or logged code.\n5. Add column values for the given inputs and expected output.\n\n## Contributing\n\nTo contirbute code to this package, please follow the steps outlined in the `integration_tests` directory's [README](https://github.com/dbt-labs/dbt-codegen/blob/main/integration_tests/README.md) file.\n"
  },
  {
    "path": "RELEASE.md",
    "content": "# dbt-codegen releases\n\n## When do we release?\nThere's a few scenarios that might prompt a release:\n\n| Scenario                                   | Release type |\n|--------------------------------------------|--------------|\n| Breaking changes to existing macros        | minor        |\n| New functionality                          | minor        |\n| Fixes to existing macros                   | patch        |\n\n## Release process\n\n1. Begin a new release by clicking [here](https://github.com/dbt-labs/dbt-codegen/releases/new)\n1. Click \"Choose a tag\", then paste your version number (with no \"v\" in the name), then click \"Create new tag: x.y.z. on publish\"\n    - The “Release title” will be identical to the tag name\n1. Click the \"Generate release notes\" button\n1. Copy and paste the generated release notes into `CHANGELOG.md`, commit, and merge into the `main` branch\n1. Click the \"Publish release\" button\n    - This will automatically create an \"Assets\" section containing:\n        - Source code (zip)\n        - Source code (tar.gz)\n"
  },
  {
    "path": "bash_scripts/base_model_creation.sh",
    "content": "#!/bin/bash\n\necho \"\" > models/stg_$1__$2.sql\n\ndbt --quiet run-operation codegen.generate_base_model --args '{\"source_name\": \"'$1'\", \"table_name\": \"'$2'\"}' | tail -n +3 >> models/stg_$1__$2.sql\n"
  },
  {
    "path": "dbt_project.yml",
    "content": "name: \"codegen\"\nversion: \"0.5.0\"\n\nrequire-dbt-version: [\">=1.1.0\", \"<3.0.0\"]\nconfig-version: 2\n\ntarget-path: \"target\"\nclean-targets: [\"target\", \"dbt_packages\"]\nmacro-paths: [\"macros\"]\nlog-path: \"logs\"\n"
  },
  {
    "path": "docker-compose.yml",
    "content": "version: \"3.7\"\nservices:\n  postgres:\n    image: cimg/postgres:17.0\n    environment:\n      - POSTGRES_USER=root\n    ports:\n      - \"5432:5432\"\n"
  },
  {
    "path": "integration_tests/README.md",
    "content": "## Table of Contents\n\n1. [Overview](#overview)\n   1. [Prerequisites](#prerequisites)\n   2. [Introduction](#introduction)\n2. [Setup](#setup)\n   1. [Configure credentials](#configure-credentials)\n   2. [Setup Postgres or other database targets](#setup-postgres-or-other-database-targets)\n   3. [Set up virtual environment](#set-up-virtual-environment)\n   4. [Install dependencies](#install-dependencies)\n3. [Write or modify an integration test](#write-or-modify-an-integration-test)\n   1. [Run the integration tests](#run-the-integration-tests)\n   2. [Creating a new integration test](#creating-a-new-integration-test)\n4. [Implement the functionality](#implement-the-functionality)\n5. [Commit your changes and open a pull request](#commit-your-changes-and-open-a-pull-request)\n\n## Overview\n\n### Prerequisites\n\n- [python3](https://www.python.org/)\n- [make](<https://en.wikipedia.org/wiki/Make_(software)>) (Optional, but recommended for better development experience)[^1]\n- [Docker](https://www.docker.com/) (Optional, but recommended for using Postgres as your target database easily)[^2]\n\n### Introduction\n\nPackages in dbt are actually dbt projects themselves, you write SQL and Jinja, sometimes in macros, to add new functionality or models to another dbt project. As SQL and Jinja rely on input data, it's essential to have a functioning project to be able to test that the code works as expected. Constantly running the code, loading data, running bits and pieces, and hoping for the best is not a good development flow though, nor is it a reliable way to ensure that everything works. This is why our dbt packages have integration tests. These tests run all of the data loading, model building, and tests that are defined in the package inside testing environments, and check that the results are as expected.\n\nIf you add or modify functionality in any codegen macros, there should be corresponding changes to the integration tests. This README will walk you through this process. Let's outline the basic steps first:\n\n1. Set up your environment (credentials, virtual environment, dependencies, test database(s))\n2. Write or modify an integration test (you should expect this to fail as you haven't implemented the functionality yet!)\n3. Implement the functionality in the new or modified macro, and run the tests to get them to pass.\n4. Commit your changes and open a pull request.\n\n## Setup\n\n### Configure credentials\n\nYou'll need to set environment variables with the credentials to access your target database. If you're using the recommended local development path of Postgres in Docker, these values are already filled in as they are generic. For the cloud warehouses listed, you'll need real credentials. You probably want to ensure you're building into a testing schema as well to keep the output of this codegen separate from any production data. We run against all the warehouses listed in the CI (implmented via CircleCI) when you open a PR, so feel free to test against Postgres while developing, and we'll ensure the code works against all the other targets.\n\nYou can set these env vars in a couple ways:\n\n- **Temporary**: Set these environment variables in your shell before running the tests. This is the easiest way to get started, but you'll have to set them every time you open a new terminal.\n- **Reusable**: If you anticipate developing for multiple sessions, set these environment variables in your shell profile (like `~/.bashrc` or `~/.zshrc`). This way, you won't have to set them every time you open a new terminal.\n\nThe environment variables you'll need to set for each adapter can be found in [integration_tests/.env/](integration_tests/.env/).\n\n### Setup Postgres or other database targets\n\nAs mentioned, you'll need a target database to run the integration tests and develop against. You can use a cloud warehouse, but the easiest and free way to work is to use Postgres locally. We include a `docker-compose.yml` file that will spin up a Postgres container for you to make this easy.\n\nTo run the Postgres container, just run:\n\n```shell\nmake setup-db\n```\n\nOr, alternatively:\n\n```shell\ndocker-compose up --detach postgres\n```\n\n> [!NOTE]\n> `make` is a venerable build tool that is included in most Unix-like operating systems. It's not strictly necessary to use `make` to develop on this project, but there are several `make` commands that wrap more complex commands and make development easier. If you don't have `make` installed or don't want to use it, you can just run the commands in the `Makefile` directly. All the examples will show both options.\n\n### Set up virtual environment\n\nWe strongly recommend using virtual environments when developing code in `dbt-codegen`. We recommend creating this virtual environment in the root of the `dbt-codegen` repository. To create a new virtual environment, run:\n\n```shell\npython3 -m venv .venv\nsource .venv/bin/activate\n```\n\nThis will create and activate a new Python virtual environment.\n\n### Install dependencies\n\nFirst make sure that you set up your virtual environment as described above. Also ensure you have the latest version of pip and setuptools installed:\n\n```\npython3 -m pip install --upgrade pip setuptools\n```\n\nNext, install `dbt-core` (and its dependencies) with:\n\n```shell\nmake dev target=[postgres|redshift|...]\n# or\npython3 -m pip install dbt-core dbt-[postgres|redshift|...]\n```\n\nOr more specific:\n\n```shell\nmake dev target=postgres\n# or\npython3 -m pip install dbt-core dbt-postgres\n```\n\nMake sure to reload your virtual environment after installing the dependencies:\n\n```shell\nsource .venv/bin/activate\n```\n\n## Write or modify an integration test\n\nRun all the tests _before_ you start developing to make sure everything is working as expected before you start making changes. Nothing is worse than spending a ton of time troubleshooting a failing test, only to realize it was failing before you touched anything. This will also ensure that you have the correct environment variables set up and that your database is running.\n\n### Run the Circle CI integration tests\n\nTo run all the integration tests on your local machine like they will get run in CI:\n\n```shell\nmake test target=[postgres|redshift|...]\n# or\n./run_test.sh [postgres|redshift|...]\n```\n\nOr more specific:\n\n```shell\nmake test target=postgres\n# or\n./run_test.sh postgres\n```\n\n### Run the tox Supported Tests\n\nTo run all the integration tests on your local machine like they will get run in the CI (using GitHub workflows with tox):\n\n```shell\nmake test_tox target=postgres\n```\n\n### Creating a new integration test\n\nAdding integration tests for new functionality typically involves making one or more of the following:\n\n- a new seed file of fixture data\n- a new model file to test against\n- a new test to assert anticipated behaviour\n\nOnce you've added and/or edited the necessary files, assuming you are in the sub-project in the `integration_tests` folder, you should be able to run and test your new additions specifically by running:\n\n```shell\ndbt deps --target {your_target}\ndbt build --target {your_target} --select +{your_selection_criteria}\n```\n\nThe `dbt build` command will handle seeding, running, and testing the selection in a single command. The `+` operator in the `--select` flag indicates we also want to build everything that this selection depends on.\n\nOr simply `make dev target={your_target}` and then `make test target={your_target}` if you're okay with running the entire project and all tests.\n\nRemember, typically you'll want to create a failing test _first_, then implement the functionality to make it pass. This is called \"test-driven development\" (TDD) and it's a great way to ensure that your code really does what you expect it to. For example, let's imagine you wrote a test expecting it to fail, but it passed before you even implemented your logic! That would mean the test is not actually testing what you want, and you'd need to re-evaluate your assumptions. That's something you want to catch early in the development process, and what TDD is all about. So, expect this run of tests after you add your new logic to fail.\n\n## Implement the functionality\n\nOkay finally, this is the fun part! You can now implement the functionality in the macro you're working on.The development flow should be something like:\n\n1. You've got a failing test, so you know what you need to implement.\n2. Implement some logic in the macro you're working on.\n3. Run the relevant tests to see if they pass.\n4. Repeat until the tests pass.\n5. Run the full test suite to ensure you didn't break anything else by accident.\n\n## Commit your changes and open a pull request\n\nOnce your tests are passing and you're happy with the code, you'll want to commit it and open a new PR on GitHub. Don't forget to run the full test suite against your target database before you open a PR to make sure you didn't accidentally break any existing functionality. When you open a PR, CircleCI will run the same test suite against all the database targets. If they're passing, we'll triage and review the code as soon as we can! Thank you for contributing to dbt-codegen!\n\n[^1]: If you're on a Mac, `make` is probably best installed with the XCode Command Line Tools, or you can install `make` via Homebrew with `brew install cmake`. If you're on Windows, you can either use the Windows Subsystem for Linux (WSL) or use `scoop` or `chocolatey` to install `make`. If you're on Linux, you probably already have `make` installed.\n[^2]: Specific instructions on installing and getting started with Docker for your OS can be found [here](https://docs.docker.com/get-docker/).\n"
  },
  {
    "path": "integration_tests/dbt_project.yml",
    "content": "name: \"codegen_integration_tests\"\nversion: \"1.0\"\nconfig-version: 2\n\nprofile: \"integration_tests\"\n\nmodel-paths: [\"models\"]\nanalysis-paths: [\"analyses\"]\ntest-paths: [\"tests\"]\nseed-paths: [\"seeds\"]\nmacro-paths: [\"macros\"]\n\ntarget-path: \"target\"\nclean-targets:\n  - \"target\"\n  - \"dbt_packages\"\n\nflags:\n  send_anonymous_usage_stats: False\n  use_colors: True\n\nseeds:\n  +schema: raw_data\n  +quote_columns: false\n\nvars:\n  my_table_reference: table_c\n\nmodels:\n  +bind: false\n"
  },
  {
    "path": "integration_tests/macros/assert_equal.sql",
    "content": "{% macro assert_equal(actual_object, expected_object) %}\n{% if not execute %}\n\n    {# pass #}\n\n{% elif actual_object != expected_object %}\n\n    {% set msg %}\n    Expected did not match actual\n\n    -----------\n    Actual:\n    -----------\n    --->{{ actual_object }}<---\n\n    -----------\n    Expected:\n    -----------\n    --->{{ expected_object }}<---\n\n    {% endset %}\n\n    {{ log(msg, info=True) }}\n\n    select 'fail'\n\n{% else %}\n\n    select 'ok' limit 0\n\n{% endif %}\n{% endmacro %}\n"
  },
  {
    "path": "integration_tests/macros/integer_type_value.sql",
    "content": "{%- macro integer_type_value() -%}\n{%- if target.type == \"snowflake\" -%}\nnumber\n{%- elif target.type == \"bigquery\" -%}\nint64\n{%- else -%}\ninteger\n{%- endif -%}\n{%- endmacro -%}\n"
  },
  {
    "path": "integration_tests/macros/operations/create_source_table.sql",
    "content": "{% macro create_source_table() %}\n\n{% set target_schema=api.Relation.create(\n    database=target.database,\n    schema=target.schema ~ \"__data_source_schema\"\n) %}\n\n\n{% do adapter.create_schema(target_schema) %}\n\n{% set drop_table_sql %}\ndrop table if exists {{ target_schema }}.codegen_integration_tests__data_source_table {% if target.type == \"redshift\" %}cascade{% endif %}\n{% endset %}\n\n{{ run_query(drop_table_sql) }}\n\n\n{% set create_table_sql %}\ncreate table {{ target_schema }}.codegen_integration_tests__data_source_table as (\n    select\n        1 as my_integer_col,\n        true as my_bool_col\n)\n{% endset %}\n\n{{ run_query(create_table_sql) }}\n\n{% set drop_table_sql_case_sensitive %}\ndrop table if exists {{ target_schema }}.codegen_integration_tests__data_source_table_case_sensitive {% if target.type == \"redshift\" %}cascade{% endif %}\n{% endset %}\n\n{{ run_query(drop_table_sql_case_sensitive) }}\n\n{% set create_table_sql_case_sensitive %}\ncreate table {{ target_schema }}.codegen_integration_tests__data_source_table_case_sensitive as (\n    select\n        1 as {{ adapter.quote(\"My_Integer_Col\") }},\n        true as {{ adapter.quote(\"My_Bool_Col\") }}\n)\n{% endset %}\n\n{{ run_query(create_table_sql_case_sensitive) }}\n\n{% endmacro %}\n"
  },
  {
    "path": "integration_tests/macros/text_type_value.sql",
    "content": "{%- macro text_type_value() -%}\n{%- if target.type == \"redshift\"-%}\ncharacter varying\n{%- elif target.type == \"snowflake\" -%}\nvarchar\n{%- elif target.type == \"bigquery\" -%}\nstring\n{%- else -%}\ntext\n{%- endif -%}\n{%- endmacro -%}\n"
  },
  {
    "path": "integration_tests/models/child_model.sql",
    "content": "select \n    * \nfrom {{ ref('model_data_a') }}\n"
  },
  {
    "path": "integration_tests/models/model_data_a.sql",
    "content": "select \n    * \nfrom {{ ref('data__a_relation') }}\n"
  },
  {
    "path": "integration_tests/models/model_from_source.sql",
    "content": "select\n    *\nfrom {{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table') }}\n"
  },
  {
    "path": "integration_tests/models/model_incremental.sql",
    "content": "{{ config(\n    materialized='incremental'\n) }}\n\nselect 1 as id\n"
  },
  {
    "path": "integration_tests/models/model_repeated.sql",
    "content": "{% if target.type == \"bigquery\" %}\n\n    {#--- This exists to test the BigQuery-specific behavior requested in #190 -#}\nselect\n  [1, 2] AS repeated_int,\n  [\n    STRUCT(1 as nested_int_field, [STRUCT(\"a\" as string_field)] as nested_repeated_struct),\n    STRUCT(2 AS nested_int_field, [STRUCT(\"a\" as string_field)] as nested_repeated_struct)\n  ] as repeated_struct\n\n{% else %}\n    select 1 as int_field\n{% endif %}\n"
  },
  {
    "path": "integration_tests/models/model_struct.sql",
    "content": "{% if target.type == \"bigquery\" %}\n\n    {#--- This exists to test the BigQuery-specific behavior requested in #27 -#}\n    select\n        STRUCT(\n            source,\n            medium,\n            source_medium\n        ) as analytics,\n        col_x\n    from {{ ref('data__campaign_analytics') }}\n\n{% else %}\n\n    {#--- This enables mimicking the BigQuery behavior for other adapters -#}\n    select\n        analytics,\n        source,\n        medium,\n        source_medium,\n        col_x\n    from {{ ref('data__campaign_analytics') }}\n\n{% endif %}\n"
  },
  {
    "path": "integration_tests/models/model_without_any_ctes.sql",
    "content": "select *, 2 as col2\nfrom {{ ref('model_without_import_ctes') }} as m\nleft join (select 2 as col_a from {{ ref('data__a_relation') }}) as a on a.col_a = m.id\nwhere id = 1"
  },
  {
    "path": "integration_tests/models/model_without_import_ctes.sql",
    "content": "/*\n    This is my model!\n*/\n\n{{ config(\n    materialized='table',\n) }}\n\n-- I love this cte\nwith my_first_cte as (\n    select\n        a.col_a,\n        b.col_b\n    from {{ ref('data__a_relation') }} as a\n    left join      {{ ref(\"data__b_relation\") }} as b\n    on a.col_a = b.col_a\n    left join {{ ref('data__a_relation') }} as aa\n    on a.col_a = aa.col_a\n),\nmy_second_cte as (\n    select\n        1 as id\n    from {{ target.schema }}__data_source_schema.codegen_integration_tests__data_source_table\n    union all\n    select\n        2 as id\n    from {{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table') }}\n    -- union all \n    -- select\n    --     3 as id\n    -- from development.codegen_integration_tests__data_source_schema.codegen_integration_tests__data_source_table\n    -- union all\n    -- select\n    --     4 as id\n    -- from {{ var(\"my_table_reference\") }}\n    -- union all\n    -- select\n    --     5 as id\n    -- from {{ var(\"my_other_table_reference\", \"table_d\") }}\n)\n-- my_third_cte as (\n--     select\n--         a.col_a,\n--         b.col_b\n--     from `raw_relation_1` as a\n--     left join \"raw_relation_2\" as b\n--     on a.col_a = b.col_b\n--     left join [raw_relation_3] as aa\n--     on a.col_a = aa.col_b\n--     left join 'raw_relation_4' as ab\n--     on a.col_a = ab.col_b\n--     left join 'my_schema'.'raw_relation_5' as ac\n--     on a.col_a = ac.col_b\n-- )\nselect * from my_second_cte"
  },
  {
    "path": "integration_tests/models/schema.yml",
    "content": "version: 2\n\nmodels:\n  - name: model_data_a\n    columns:\n      - name: col_a\n        description: 'description column \"a\"'"
  },
  {
    "path": "integration_tests/models/source.yml",
    "content": "version: 2\n\nsources:\n  - name: codegen_integration_tests__data_source_schema\n    schema: \"{{ target.schema ~ '__data_source_schema' }}\"\n    tables:\n      - name: codegen_integration_tests__data_source_table\n        columns:\n          - name: my_integer_col\n            description: My Integer Column\n          - name: my_bool_col\n            description: My Boolean Column\n      - name: codegen_integration_tests__data_source_table_case_sensitive\n"
  },
  {
    "path": "integration_tests/package-lock.yml",
    "content": "packages:\n- local: ../\n- package: dbt-labs/dbt_utils\n  version: 1.1.1\nsha1_hash: de2deba3d66ce03d8c02949013650cc9b94f6030\n"
  },
  {
    "path": "integration_tests/packages.yml",
    "content": "\npackages:\n    - local: ../\n"
  },
  {
    "path": "integration_tests/profiles.yml",
    "content": "\n# HEY! This file is used in the dbt-codegen integrations tests with GitHub CI.\n# You should __NEVER__ check credentials into version control. That's why we use environment variables everywhere.\n# Thanks for reading :)\n\nintegration_tests:\n  target: postgres\n  outputs:\n    postgres:\n      type: \"postgres\"\n      host: \"{{ env_var('POSTGRES_HOST') }}\"\n      user: \"{{ env_var('POSTGRES_USER') }}\"\n      pass: \"{{ env_var('DBT_ENV_SECRET_POSTGRES_PASS') }}\"\n      port: \"{{ env_var('POSTGRES_PORT') | as_number }}\"\n      dbname: \"{{ env_var('POSTGRES_DATABASE') }}\"\n      schema: \"{{ env_var('POSTGRES_SCHEMA') }}\"\n      threads: 5\n\n    redshift:\n      type: \"redshift\"\n      host: \"{{ env_var('REDSHIFT_HOST') }}\"\n      user: \"{{ env_var('REDSHIFT_USER') }}\"\n      pass: \"{{ env_var('DBT_ENV_SECRET_REDSHIFT_PASS') }}\"\n      dbname: \"{{ env_var('REDSHIFT_DATABASE') }}\"\n      port: \"{{ env_var('REDSHIFT_PORT') | as_number }}\"\n      schema: \"{{ env_var('REDSHIFT_SCHEMA') }}\"\n      threads: 1\n\n    bigquery:\n      type: bigquery\n      method: service-account-json\n      keyfile_json:\n        \"{{ env_var('BIGQUERY_KEYFILE_JSON') | as_native}}\"\n      project: \"{{ env_var('BIGQUERY_PROJECT') }}\"\n      dataset: \"{{ env_var('BIGQUERY_SCHEMA') }}\"\n      threads: 1\n\n    snowflake:\n      type: \"snowflake\"\n      account: \"{{ env_var('SNOWFLAKE_ACCOUNT') }}\"\n      user: \"{{ env_var('SNOWFLAKE_USER') }}\"\n      password: \"{{ env_var('DBT_ENV_SECRET_SNOWFLAKE_PASS') }}\"\n      role: \"{{ env_var('SNOWFLAKE_ROLE') }}\"\n      database: \"{{ env_var('SNOWFLAKE_DATABASE') }}\"\n      warehouse: \"{{ env_var('SNOWFLAKE_WAREHOUSE') }}\"\n      schema: \"{{ env_var('SNOWFLAKE_SCHEMA') }}\"\n      threads: 1\n"
  },
  {
    "path": "integration_tests/seeds/data__a_relation.csv",
    "content": "col_a,col_b\n1,a\n2,b\n"
  },
  {
    "path": "integration_tests/seeds/data__b_relation.csv",
    "content": "col_a,col_b\n3,c\n4,d"
  },
  {
    "path": "integration_tests/seeds/data__campaign_analytics.csv",
    "content": "source,medium,source_medium,analytics,col_x\nsource_1,medium_a,1a,,x\nsource_2,medium_b,2b,,x\nsource_3,medium_c,3c,,x\n"
  },
  {
    "path": "integration_tests/tests/test_generate_base_models.sql",
    "content": "\n{% set actual_base_model = codegen.generate_base_model(\n    source_name='codegen_integration_tests__data_source_schema',\n    table_name='codegen_integration_tests__data_source_table'\n  )\n%}\n\n{% set expected_base_model %}\n\nwith source as (\n\n    select * from {%raw%}{{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table') }}{%endraw%}\n\n),\n\nrenamed as (\n\n    select\n        my_integer_col,\n        my_bool_col\n\n    from source\n\n)\n\nselect * from renamed\n{% endset %}\n\n{{ assert_equal (actual_base_model | trim, expected_base_model | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_base_models_all_args.sql",
    "content": "\n{% set actual_base_model = codegen.generate_base_model(\n    source_name='codegen_integration_tests__data_source_schema',\n    table_name='codegen_integration_tests__data_source_table_case_sensitive',\n    leading_commas=True,\n    case_sensitive_cols=True,\n    materialized='table'\n  )\n%}\n\n{% set expected_base_model %}\n{{ \"{{ config(materialized='table') }}\" }}\n\nwith source as (\n\n    select * from {%raw%}{{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table_case_sensitive') }}{%endraw%}\n\n),\n\nrenamed as (\n\n    select\n        {{ adapter.quote(\"My_Integer_Col\") }}\n        , {{ adapter.quote(\"My_Bool_Col\") }}\n\n    from source\n\n)\n\nselect * from renamed\n{% endset %}\n\n{{ assert_equal (actual_base_model | trim, expected_base_model | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_base_models_case_sensitive.sql",
    "content": "{% set actual_base_model = codegen.generate_base_model(\n    source_name='codegen_integration_tests__data_source_schema',\n    table_name='codegen_integration_tests__data_source_table_case_sensitive',\n    case_sensitive_cols=True\n  )\n%}\n\n{% set expected_base_model %}\n\nwith source as (\n\n    select * from {%raw%}{{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table_case_sensitive') }}{%endraw%}\n\n),\n\nrenamed as (\n\n    select\n        {{ adapter.quote(\"My_Integer_Col\") }},\n        {{ adapter.quote(\"My_Bool_Col\") }}\n\n    from source\n\n)\n\nselect * from renamed\n{% endset %}\n\n{{ assert_equal (actual_base_model | trim, expected_base_model | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_base_models_leading.sql",
    "content": "\n{% set actual_base_model = codegen.generate_base_model(\n    source_name='codegen_integration_tests__data_source_schema',\n    table_name='codegen_integration_tests__data_source_table',\n    leading_commas=True\n  )\n%}\n\n{% set expected_base_model %}\n\nwith source as (\n\n    select * from {%raw%}{{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table') }}{%endraw%}\n\n),\n\nrenamed as (\n\n    select\n        my_integer_col\n        , my_bool_col\n\n    from source\n\n)\n\nselect * from renamed\n{% endset %}\n\n{{ assert_equal (actual_base_model | trim, expected_base_model | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_model_import_ctes.sql",
    "content": "{% set actual_model_with_import_ctes = codegen.generate_model_import_ctes(\n    model_name = 'model_without_import_ctes'\n  )\n%}\n\n{% set expected_model_with_import_ctes %}\n/*\n    This is my model!\n*/\n\n{% raw %}{{ config(\n    materialized='table',\n) }}{% endraw %}\n\nwith data__a_relation as (\n\n    select * from {% raw %}{{ ref('data__a_relation') }}{% endraw %}\n  \n),\n\ndata__b_relation as (\n\n    select * from {% raw %}{{ ref(\"data__b_relation\") }}{% endraw %}\n  \n),\n\ndevelopment_codegen_integration_tests__data_source_schema_codegen_integration_tests__data_source_table as (\n\n    select * from development.codegen_integration_tests__data_source_schema.codegen_integration_tests__data_source_table\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n),\n\nmy_other_table_reference as (\n\n    select * from {% raw %}{{ var(\"my_other_table_reference\", \"table_d\") }}{% endraw %}\n    -- CAUTION: It's best practice to use the ref or source function instead of a var\n  \n),\n\nmy_schema_raw_relation_5 as (\n\n    select * from 'my_schema'.'raw_relation_5'\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n),\n\nmy_table_reference as (\n\n    select * from {% raw %}{{ var(\"my_table_reference\") }}{% endraw %}\n    -- CAUTION: It's best practice to use the ref or source function instead of a var\n  \n),\n\nraw_relation_1 as (\n\n    select * from `raw_relation_1`\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n),\n\nraw_relation_2 as (\n\n    select * from \"raw_relation_2\"\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n),\n\nraw_relation_3 as (\n\n    select * from [raw_relation_3]\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n),\n\nraw_relation_4 as (\n\n    select * from 'raw_relation_4'\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n),\n\nsource_codegen_integration_tests__data_source_table as (\n\n    select * from {% raw %}{{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table') }}{% endraw %} \n    -- CAUTION: It's best practice to create staging layer for raw sources\n  \n),\n\n-- I love this cte\nmy_first_cte as (\n    select\n        a.col_a,\n        b.col_b\n    from data__a_relation as a\n    left join data__b_relation as b\n    on a.col_a = b.col_a\n    left join data__a_relation as aa\n    on a.col_a = aa.col_a\n),\nmy_second_cte as (\n    select\n        1 as id\n    from {% raw %}{{ target.schema }}{% endraw %}__data_source_schema.codegen_integration_tests__data_source_table\n    union all\n    select\n        2 as id\n    from source_codegen_integration_tests__data_source_table\n    -- union all \n    -- select\n    --     3 as id\n    -- from development_codegen_integration_tests__data_source_schema_codegen_integration_tests__data_source_table\n    -- union all\n    -- select\n    --     4 as id\n    -- from my_table_reference\n    -- union all\n    -- select\n    --     5 as id\n    -- from my_other_table_reference\n)\n-- my_third_cte as (\n--     select\n--         a.col_a,\n--         b.col_b\n--     from raw_relation_1 as a\n--     left join raw_relation_2 as b\n--     on a.col_a = b.col_b\n--     left join raw_relation_3 as aa\n--     on a.col_a = aa.col_b\n--     left join raw_relation_4 as ab\n--     on a.col_a = ab.col_b\n--     left join my_schema_raw_relation_5 as ac\n--     on a.col_a = ac.col_b\n-- )\nselect * from my_second_cte\n{% endset %}\n\n{{ assert_equal (actual_model_with_import_ctes | trim, expected_model_with_import_ctes | trim) }}"
  },
  {
    "path": "integration_tests/tests/test_generate_model_import_ctes_leading.sql",
    "content": "{% set actual_model_with_import_ctes = codegen.generate_model_import_ctes(\n    model_name = 'model_without_import_ctes',\n    leading_commas = true\n  )\n%}\n\n{% set expected_model_with_import_ctes %}\n/*\n    This is my model!\n*/\n\n{% raw %}{{ config(\n    materialized='table',\n) }}{% endraw %}\n\nwith data__a_relation as (\n\n    select * from {% raw %}{{ ref('data__a_relation') }}{% endraw %}\n  \n)\n\n,data__b_relation as (\n\n    select * from {% raw %}{{ ref(\"data__b_relation\") }}{% endraw %}\n  \n)\n\n,development_codegen_integration_tests__data_source_schema_codegen_integration_tests__data_source_table as (\n\n    select * from development.codegen_integration_tests__data_source_schema.codegen_integration_tests__data_source_table\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n)\n\n,my_other_table_reference as (\n\n    select * from {% raw %}{{ var(\"my_other_table_reference\", \"table_d\") }}{% endraw %}\n    -- CAUTION: It's best practice to use the ref or source function instead of a var\n  \n)\n\n,my_schema_raw_relation_5 as (\n\n    select * from 'my_schema'.'raw_relation_5'\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n)\n\n,my_table_reference as (\n\n    select * from {% raw %}{{ var(\"my_table_reference\") }}{% endraw %}\n    -- CAUTION: It's best practice to use the ref or source function instead of a var\n  \n)\n\n,raw_relation_1 as (\n\n    select * from `raw_relation_1`\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n)\n\n,raw_relation_2 as (\n\n    select * from \"raw_relation_2\"\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n)\n\n,raw_relation_3 as (\n\n    select * from [raw_relation_3]\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n)\n\n,raw_relation_4 as (\n\n    select * from 'raw_relation_4'\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n  \n)\n\n,source_codegen_integration_tests__data_source_table as (\n\n    select * from {% raw %}{{ source('codegen_integration_tests__data_source_schema', 'codegen_integration_tests__data_source_table') }}{% endraw %} \n    -- CAUTION: It's best practice to create staging layer for raw sources\n  \n)\n\n-- I love this cte\n,my_first_cte as (\n    select\n        a.col_a,\n        b.col_b\n    from data__a_relation as a\n    left join data__b_relation as b\n    on a.col_a = b.col_a\n    left join data__a_relation as aa\n    on a.col_a = aa.col_a\n),\nmy_second_cte as (\n    select\n        1 as id\n    from {% raw %}{{ target.schema }}{% endraw %}__data_source_schema.codegen_integration_tests__data_source_table\n    union all\n    select\n        2 as id\n    from source_codegen_integration_tests__data_source_table\n    -- union all \n    -- select\n    --     3 as id\n    -- from development_codegen_integration_tests__data_source_schema_codegen_integration_tests__data_source_table\n    -- union all\n    -- select\n    --     4 as id\n    -- from my_table_reference\n    -- union all\n    -- select\n    --     5 as id\n    -- from my_other_table_reference\n)\n-- my_third_cte as (\n--     select\n--         a.col_a,\n--         b.col_b\n--     from raw_relation_1 as a\n--     left join raw_relation_2 as b\n--     on a.col_a = b.col_b\n--     left join raw_relation_3 as aa\n--     on a.col_a = aa.col_b\n--     left join raw_relation_4 as ab\n--     on a.col_a = ab.col_b\n--     left join my_schema_raw_relation_5 as ac\n--     on a.col_a = ac.col_b\n-- )\nselect * from my_second_cte\n{% endset %}\n\n{{ assert_equal (actual_model_with_import_ctes | trim, expected_model_with_import_ctes | trim) }}"
  },
  {
    "path": "integration_tests/tests/test_generate_model_import_ctes_no_ctes.sql",
    "content": "{% set actual_model_with_import_ctes = codegen.generate_model_import_ctes(\n    model_name = 'model_without_any_ctes'\n  )\n%}\n\n{% set expected_model_with_import_ctes %}\nwith data__a_relation as (\n\n    select * from {% raw %}{{ ref('data__a_relation') }}{% endraw %}\n  \n),\n\nmodel_without_import_ctes as (\n\n    select * from {% raw %}{{ ref('model_without_import_ctes') }}{% endraw %}\n  \n)\n\nselect *, 2 as col2\nfrom model_without_import_ctes as m\nleft join (select 2 as col_a from data__a_relation) as a on a.col_a = m.id\nwhere id = 1\n{% endset %}\n\n{{ assert_equal (actual_model_with_import_ctes | trim, expected_model_with_import_ctes | trim) }}"
  },
  {
    "path": "integration_tests/tests/test_generate_model_repeated_yaml.sql",
    "content": "{% set raw_schema = generate_schema_name('raw_data') %}\n\n{% set actual_source_yaml = codegen.generate_model_yaml(\n    model_names=['model_repeated']\n  )\n%}\n\n{% if target.type == \"bigquery\" %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nmodels:\n  - name: model_repeated\n    description: \"\"\n    columns:\n      - name: repeated_int\n        data_type: array<int64>\n        description: \"\"\n\n      - name: repeated_struct\n        data_type: array\n        description: \"\"\n\n      - name: repeated_struct.nested_int_field\n        data_type: int64\n        description: \"\"\n\n      - name: repeated_struct.nested_repeated_struct\n        data_type: array\n        description: \"\"\n\n      - name: repeated_struct.nested_repeated_struct.string_field\n        data_type: string\n        description: \"\"\n\n{% endset %}\n\n{% else %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nmodels:\n  - name: model_repeated\n    description: \"\"\n    columns:\n      - name: int_field\n        data_type: {{ integer_type_value() }}\n        description: \"\"\n\n{% endset %}\n\n{% endif %}\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_model_struct_yaml.sql",
    "content": "{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test all args\n{% set actual_source_yaml = codegen.generate_source(\n    database_name=target.database,\n    schema_name='codegen_integration_tests__data_source_schema',\n    table_names=['codegen_integration_tests__data_source_table_nested_array'],\n    generate_columns=True,\n    include_descriptions=True\n) %}\n\n{% set actual_source_yaml = codegen.generate_model_yaml(\n    model_names=['model_struct'],\n    include_data_types=False\n  )\n%}\n\n{% if target.type == \"bigquery\" %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nmodels:\n  - name: model_struct\n    description: \"\"\n    columns:\n      - name: analytics\n        description: \"\"\n\n      - name: analytics.source\n        description: \"\"\n\n      - name: analytics.medium\n        description: \"\"\n\n      - name: analytics.source_medium\n        description: \"\"\n\n      - name: col_x\n        description: \"\"\n\n{% endset %}\n\n{% else %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nmodels:\n  - name: model_struct\n    description: \"\"\n    columns:\n      - name: analytics\n        description: \"\"\n\n      - name: source\n        description: \"\"\n\n      - name: medium\n        description: \"\"\n\n      - name: source_medium\n        description: \"\"\n\n      - name: col_x\n        description: \"\"\n\n{% endset %}\n\n{% endif %}\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_model_yaml.sql",
    "content": "{% set actual_model_yaml = codegen.generate_model_yaml(\n    model_names=['data__a_relation']\n  )\n%}\n\n{% set expected_model_yaml %}\nversion: 2\n\nmodels:\n  - name: data__a_relation\n    description: \"\"\n    columns:\n      - name: col_a\n        data_type: {{ integer_type_value() }}\n        description: \"\"\n\n      - name: col_b\n        data_type: {{ text_type_value() }}\n        description: \"\"\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_model_yaml_multiple_models.sql",
    "content": "{% set actual_model_yaml = codegen.generate_model_yaml(\n    model_names=['data__a_relation','data__b_relation'],\n    include_data_types=False\n  )\n%}\n\n{% set expected_model_yaml %}\nversion: 2\n\nmodels:\n  - name: data__a_relation\n    description: \"\"\n    columns:\n      - name: col_a\n        description: \"\"\n\n      - name: col_b\n        description: \"\"\n\n  - name: data__b_relation\n    description: \"\"\n    columns:\n      - name: col_a\n        description: \"\"\n\n      - name: col_b\n        description: \"\"\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_model_yaml_upstream_descriptions.sql",
    "content": "{% set actual_model_yaml = codegen.generate_model_yaml(\n    model_names=['child_model'],\n    upstream_descriptions=True,\n    include_data_types=False\n  )\n%}\n\n{% set expected_model_yaml %}\nversion: 2\n\nmodels:\n  - name: child_model\n    description: \"\"\n    columns:\n      - name: col_a\n        description: \"description column \\\"a\\\"\"\n\n      - name: col_b\n        description: \"\"\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_model_yaml_upstream_source_descriptions.sql",
    "content": "{% set actual_model_yaml = codegen.generate_model_yaml(\n    model_names=['model_from_source'],\n    upstream_descriptions=True,\n    include_data_types=False\n  )\n%}\n\n{% set expected_model_yaml %}\nversion: 2\n\nmodels:\n  - name: model_from_source\n    description: \"\"\n    columns:\n      - name: my_integer_col\n        description: \"My Integer Column\"\n\n      - name: my_bool_col\n        description: \"My Boolean Column\"\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test default args\n{% set actual_source_yaml = codegen.generate_source(raw_schema) %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    tables:\n      - name: data__a_relation\n      - name: data__b_relation\n      - name: data__campaign_analytics\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_all_args.sql",
    "content": "{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test all args\n{% set actual_source_yaml = codegen.generate_source(\n    schema_name=raw_schema,\n    table_pattern='%',\n    exclude='',\n    database_name=target.database,\n    generate_columns=True,\n    include_descriptions=True,\n    include_data_types=True,\n    name=raw_schema,\n    table_names=None,\n    include_database=True,\n    include_schema=True,\n    case_sensitive_databases=False,\n    case_sensitive_schemas=False,\n    case_sensitive_tables=False,\n    case_sensitive_cols=False\n) %}\n\n\n{% set expected_source_yaml %}\n\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    description: \"\"\n    database: {{ target.database | trim | lower }}\n    schema: {{ raw_schema | trim | lower }}\n    tables:\n      - name: data__a_relation\n        description: \"\"\n        columns:\n          - name: col_a\n            data_type: {{ integer_type_value() }}\n            description: \"\"\n          - name: col_b\n            data_type: {{ text_type_value() }}\n            description: \"\"\n\n      - name: data__b_relation\n        description: \"\"\n        columns:\n          - name: col_a\n            data_type: {{ integer_type_value() }}\n            description: \"\"\n          - name: col_b\n            data_type: {{ text_type_value() }}\n            description: \"\"\n\n      - name: data__campaign_analytics\n        description: \"\"\n        columns:\n          - name: source\n            data_type: {{ text_type_value() }}\n            description: \"\"\n          - name: medium\n            data_type: {{ text_type_value() }}\n            description: \"\"\n          - name: source_medium\n            data_type: {{ text_type_value() }}\n            description: \"\"\n          - name: analytics\n            data_type: {{ integer_type_value() }}\n            description: \"\"\n          - name: col_x\n            data_type: {{ text_type_value() }}\n            description: \"\"\n\n{% endset %}\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_exclude.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test default args\n{% set actual_source_yaml = codegen.generate_source(raw_schema, table_pattern='data__%', exclude='data__a_%') %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower}}\n    tables:\n      - name: data__b_relation\n      - name: data__campaign_analytics\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_include_database_property.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n{% set actual_source_yaml = codegen.generate_source(raw_schema, include_database=True) %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    database: {{ target.database | trim | lower }}\n    tables:\n      - name: data__a_relation\n      - name: data__b_relation\n      - name: data__campaign_analytics\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_include_schema_property.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n{% set actual_source_yaml = codegen.generate_source(raw_schema, include_schema=True) %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    schema: {{ raw_schema | trim | lower }}\n    tables:\n      - name: data__a_relation\n      - name: data__b_relation\n      - name: data__campaign_analytics\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_some_tables.sql",
    "content": "{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test all args\n{% set actual_source_yaml = codegen.generate_source(\n    schema_name=raw_schema,\n    database_name=target.database,\n    table_names=['data__a_relation'],\n    generate_columns=True,\n    include_descriptions=True,\n    include_data_types=False\n) %}\n\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    description: \"\"\n    tables:\n      - name: data__a_relation\n        description: \"\"\n        columns:\n          - name: col_a\n            description: \"\"\n          - name: col_b\n            description: \"\"\n\n{% endset %}\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_table_descriptions.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test default args\n{% set actual_source_yaml = codegen.generate_source(raw_schema, include_descriptions=True) %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    description: \"\"\n    tables:\n      - name: data__a_relation\n        description: \"\"\n      - name: data__b_relation\n        description: \"\"\n      - name: data__campaign_analytics\n        description: \"\"\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_table_name.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test default args\n{% set actual_source_yaml = codegen.generate_source(raw_schema, name='raw') %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: raw\n    schema: {{ raw_schema | trim | lower }}\n    tables:\n      - name: data__a_relation\n      - name: data__b_relation\n      - name: data__campaign_analytics\n\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_source_table_pattern.sql",
    "content": "\n{% set raw_schema = generate_schema_name('raw_data') %}\n\n-- test default args\n{% set actual_source_yaml = codegen.generate_source(raw_schema, table_pattern='data__b_%') %}\n\n{% set expected_source_yaml %}\nversion: 2\n\nsources:\n  - name: {{ raw_schema | trim | lower }}\n    tables:\n      - name: data__b_relation\n{% endset %}\n\n\n{{ assert_equal (actual_source_yaml | trim, expected_source_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_unit_test_template.sql",
    "content": "{% set actual_model_yaml = codegen.generate_unit_test_template(\n    model_name='child_model',\n    inline_columns=False\n  )\n%}\n\n-- depends_on: {{ ref('model_data_a') }}\n-- depends_on: {{ ref('child_model') }}\n\n{% set expected_model_yaml %}\nunit_tests:\n  - name: unit_test_child_model\n    model: child_model\n\n    given:\n      - input: ref(\"model_data_a\")\n        rows:\n          - col_a: \n            col_b: \n\n    expect:\n      rows:\n        - col_a: \n          col_b: \n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_unit_test_template_incremental.sql",
    "content": "{% set actual_model_yaml = codegen.generate_unit_test_template(\n    model_name='model_incremental',\n  )\n%}\n\n-- depends_on: {{ ref('model_incremental') }}\n\n{% set expected_model_yaml %}\nunit_tests:\n  - name: unit_test_model_incremental\n    model: model_incremental\n\n    overrides:\n      macros:\n        is_incremental: true\n\n    given:\n      - input: this\n        rows:\n          - id: \n\n    expect:\n      rows:\n        - id:\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_unit_test_template_inline_columns.sql",
    "content": "{% set actual_model_yaml = codegen.generate_unit_test_template(\n    model_name='child_model',\n    inline_columns=True\n  )\n%}\n\n-- depends_on: {{ ref('model_data_a') }}\n-- depends_on: {{ ref('child_model') }}\n\n{% set expected_model_yaml %}\nunit_tests:\n  - name: unit_test_child_model\n    model: child_model\n\n    given:\n      - input: ref(\"model_data_a\")\n        rows:\n          - {col_a: , col_b: }\n\n    expect:\n      rows:\n        - {col_a: , col_b: }\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_unit_test_template_model_from_source.sql",
    "content": "{% set actual_model_yaml = codegen.generate_unit_test_template(\n    model_name='model_from_source',\n  )\n%}\n\n-- depends_on: {{ ref('model_from_source') }}\n\n{% set expected_model_yaml %}\nunit_tests:\n  - name: unit_test_model_from_source\n    model: model_from_source\n\n    given:\n      - input: source(\"codegen_integration_tests__data_source_schema\", \"codegen_integration_tests__data_source_table\")\n        rows:\n          - my_integer_col: \n            my_bool_col: \n\n    expect:\n      rows:\n        - my_integer_col: \n          my_bool_col:\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_generate_unit_test_template_no_inputs.sql",
    "content": "{% set actual_model_yaml = codegen.generate_unit_test_template(\n    model_name='data__a_relation',\n    inline_columns=False\n  )\n%}\n\n-- depends_on: {{ ref('data__a_relation') }}\n\n{% set expected_model_yaml %}\nunit_tests:\n  - name: unit_test_data__a_relation\n    model: data__a_relation\n\n    given: []\n\n    expect:\n      rows:\n        - col_a: \n          col_b:\n\n{% endset %}\n\n{{ assert_equal (actual_model_yaml | trim, expected_model_yaml | trim) }}\n"
  },
  {
    "path": "integration_tests/tests/test_helper_get_models.sql",
    "content": "-- depends_on: {{ ref('model_data_a') }}\n-- depends_on: {{ ref('model_incremental') }}\n-- depends_on: {{ ref('model_struct') }}\n-- depends_on: {{ ref('model_without_import_ctes') }}\n-- depends_on: {{ ref('model_without_any_ctes') }}\n\n{% if execute %}\n{% set actual_list = codegen.get_models(prefix='model_')|sort %}\n{% endif %}\n\n{% set expected_list = ['model_data_a', 'model_from_source', 'model_incremental', 'model_repeated', 'model_struct', 'model_without_any_ctes', 'model_without_import_ctes'] %}\n\n{{ assert_equal (actual_list, expected_list) }}\n"
  },
  {
    "path": "macros/create_base_models.sql",
    "content": "{% macro create_base_models(source_name, tables) %}\n    {{ return(adapter.dispatch('create_base_models', 'codegen')(source_name, tables)) }}\n{% endmacro %}\n\n{% macro default__create_base_models(source_name, tables) %}\n\n{% set source_name = \"\"~ source_name ~\"\" %}\n\n{% set zsh_command_models = \"source dbt_packages/codegen/bash_scripts/base_model_creation.sh \"\"\"~ source_name ~\"\"\" \" %}\n\n{%- set models_array = [] -%}\n\n{% for t in tables %}\n    {% set help_command = zsh_command_models + t %}\n    {{ models_array.append(help_command) }}\n{% endfor %}\n\n{{ log(\"Run these commands in your shell to generate the models:\\n\" ~ models_array|join(' && \\n'), info=True) }}\n\n{% endmacro %}\n"
  },
  {
    "path": "macros/generate_base_model.sql",
    "content": "{% macro generate_base_model(source_name, table_name, leading_commas=False, case_sensitive_cols=False, materialized=None) %}\n  {{ return(adapter.dispatch('generate_base_model', 'codegen')(source_name, table_name, leading_commas, case_sensitive_cols, materialized)) }}\n{% endmacro %}\n\n{% macro default__generate_base_model(source_name, table_name, leading_commas, case_sensitive_cols, materialized) %}\n\n{%- set source_relation = source(source_name, table_name) -%}\n\n{%- set columns = adapter.get_columns_in_relation(source_relation) -%}\n{% set column_names=columns | map(attribute='name') %}\n{% set base_model_sql %}\n\n{%- if materialized is not none -%}\n    {{ \"{{ config(materialized='\" ~ materialized ~ \"') }}\" }}\n{%- endif %}\n\nwith source as (\n\n    select * from {% raw %}{{ source({% endraw %}'{{ source_name }}', '{{ table_name }}'{% raw %}) }}{% endraw %}\n\n),\n\nrenamed as (\n\n    select\n        {%- if leading_commas -%}\n        {%- for column in column_names %}\n        {{\", \" if not loop.first}}{% if not case_sensitive_cols %}{{ column | lower }}{% else %}{{ adapter.quote(column) }}{% endif %}\n        {%- endfor %}\n        {%- else -%}\n        {%- for column in column_names %}\n        {% if not case_sensitive_cols %}{{ column | lower }}{% else %}{{ adapter.quote(column) }}{% endif %}{{\",\" if not loop.last}}\n        {%- endfor -%}\n        {%- endif %}\n\n    from source\n\n)\n\nselect * from renamed\n{% endset %}\n\n{% if execute %}\n\n{{ print(base_model_sql) }}\n{% do return(base_model_sql) %}\n\n{% endif %}\n{% endmacro %}\n"
  },
  {
    "path": "macros/generate_model_import_ctes.sql",
    "content": "{% macro generate_model_import_ctes(model_name, leading_commas = False) %}\n    {{ return(adapter.dispatch('generate_model_import_ctes', 'codegen')(model_name, leading_commas)) }}\n{% endmacro %}\n\n{% macro default__generate_model_import_ctes(model_name, leading_commas) %}\n\n    {%- if execute -%}\n    {%- set nodes = graph.nodes.values() -%}\n\n    {%- set model = (nodes\n        | selectattr('name', 'equalto', model_name) \n        | selectattr('resource_type', 'equalto', 'model')\n        | list).pop() -%}\n\n    {%- set model_raw_sql = model.raw_sql or model.raw_code -%}\n    {%- else -%}\n    {%- set model_raw_sql = '' -%}\n    {%- endif -%}\n\n    {#-\n\n        REGEX Explanations\n\n        # with_regex\n        - matches (start of file followed by anything then whitespace\n        or whitespace\n        or a comma) followed by the word with then a space   \n\n        # from_ref \n        - matches (from or join) followed by some spaces and then {{ref(<something>)}}\n\n        # from_source \n        - matches (from or join) followed by some spaces and then {{source(<something>,<something_else>)}}\n\n        # from_var_1\n        - matches (from or join) followed by some spaces and then {{var(<something>)}}\n\n        # from_var_2\n        - matches (from or join) followed by some spaces and then {{var(<something>,<something_else>)}}\n\n        # from_table_1\n        - matches (from or join) followed by some spaces and then <something>.<something_else>\n          where each <something> is enclosed by (` or [ or \" or ' or nothing)\n\n        # from_table_2\n        - matches (from or join) followed by some spaces and then <something>.<something_else>.<something_different>\n          where each <something> is enclosed by (` or [ or \" or ' or nothing)\n\n        # from_table_3\n        - matches (from or join) followed by some spaces and then <something>\n          where <something> is enclosed by (` or [ or \" or ')\n\n        # config block\n        - matches the start of the file followed by anything and then {{config(<something>)}}\n\n    -#}\n\n    {%- set re = modules.re -%}\n\n    {%- set with_regex = '(?i)(?s)(^.*\\s*|\\s+|,)with\\s' -%}\n    {%- set does_raw_sql_contain_cte = re.search(with_regex, model_raw_sql) -%}\n\n    {%- set from_regexes = {\n        'from_ref':\n            '(?ix)\n\n            # first matching group\n            # from or join followed by at least 1 whitespace character\n            (from|join)\\s+\n\n            # second matching group\n            # opening {{, 0 or more whitespace character(s), ref, 0 or more whitespace character(s), an opening parenthesis, 0 or more whitespace character(s), 1 or 0 quotation mark\n            ({{\\s*ref\\s*\\(\\s*[\\'\\\"]?)\n            \n            # third matching group\n            # at least 1 of anything except a parenthesis or quotation mark\n            ([^)\\'\\\"]+)\n            \n            # fourth matching group\n            # 1 or 0 quotation mark, 0 or more whitespace character(s)\n            ([\\'\\\"]?\\s*)\n\n            # fifth matching group\n            # a closing parenthesis, 0 or more whitespace character(s), closing }}\n            (\\)\\s*}})\n        \n            ',\n        'from_source':\n            '(?ix)\n\n            # first matching group\n            # from or join followed by at least 1 whitespace character\n            (from|join)\\s+\n\n            # second matching group\n            # opening {{, 0 or more whitespace character(s), source, 0 or more whitespace character(s), an opening parenthesis, 0 or more whitespace character(s), 1 or 0 quotation mark\n            ({{\\s*source\\s*\\(\\s*[\\'\\\"]?)\n\n            # third matching group\n            # at least 1 of anything except a parenthesis or quotation mark\n            ([^)\\'\\\"]+)\n\n            # fourth matching group\n            # 1 or 0 quotation mark, 0 or more whitespace character(s)\n            ([\\'\\\"]?\\s*)\n\n            # fifth matching group\n            # a comma\n            (,)\n\n            # sixth matching group\n            # 0 or more whitespace character(s), 1 or 0 quotation mark\n            (\\s*[\\'\\\"]?)\n\n            # seventh matching group\n            # at least 1 of anything except a parenthesis or quotation mark\n            ([^)\\'\\\"]+)\n\n            # eighth matching group\n            # 1 or 0 quotation mark, 0 or more whitespace character(s)\n            ([\\'\\\"]?\\s*)\n\n            # ninth matching group\n            # a closing parenthesis, 0 or more whitespace character(s), closing }}\n            (\\)\\s*}})\n\n            ',\n        'from_var_1':\n            '(?ix)\n\n            # first matching group\n            # from or join followed by at least 1 whitespace character\n            (from|join)\\s+\n\n            # second matching group\n            # opening {{, 0 or more whitespace character(s), var, 0 or more whitespace character(s), an opening parenthesis, 0 or more whitespace character(s), 1 or 0 quotation mark\n            ({{\\s*var\\s*\\(\\s*[\\'\\\"]?)\n\n            # third matching group\n            # at least 1 of anything except a parenthesis or quotation mark\n            ([^)\\'\\\"]+)\n\n            # fourth matching group\n            # 1 or 0 quotation mark, 0 or more whitespace character(s)\n            ([\\'\\\"]?\\s*)\n\n            # fifth matching group\n            # a closing parenthesis, 0 or more whitespace character(s), closing }}\n            (\\)\\s*}})\n            \n            ',\n        'from_var_2':\n            '(?ix)\n\n            # first matching group\n            # from or join followed by at least 1 whitespace character\n            (from|join)\\s+\n            \n            # second matching group\n            # opening {{, 0 or more whitespace character(s), var, 0 or more whitespace character(s), an opening parenthesis, 0 or more whitespace character(s), 1 or 0 quotation mark\n            ({{\\s*var\\s*\\(\\s*[\\'\\\"]?)\n\n            # third matching group\n            # at least 1 of anything except a parenthesis or quotation mark            \n            ([^)\\'\\\"]+)\n            \n            # fourth matching group\n            # 1 or 0 quotation mark, 0 or more whitespace character(s)\n            ([\\'\\\"]?\\s*)\n\n            # fifth matching group\n            # a comma\n            (,)\n\n            # sixth matching group\n            # 0 or more whitespace character(s), 1 or 0 quotation mark            \n            (\\s*[\\'\\\"]?)\n\n            # seventh matching group\n            # at least 1 of anything except a parenthesis or quotation mark            \n            ([^)\\'\\\"]+)\n\n            # eighth matching group\n            # 1 or 0 quotation mark, 0 or more whitespace character(s)            \n            ([\\'\\\"]?\\s*)\n\n            # ninth matching group\n            # a closing parenthesis, 0 or more whitespace character(s), closing }}            \n            (\\)\\s*}})\n            \n            ',\n        'from_table_1':\n            '(?ix)\n            \n            # first matching group\n            # from or join followed by at least 1 whitespace character            \n            (from|join)\\s+\n            \n            # second matching group\n            # 1 or 0 of (opening bracket, backtick, or quotation mark)\n            ([\\[`\\\"\\']?)\n            \n            # third matching group\n            # at least 1 word character\n            (\\w+)\n            \n            # fouth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark)\n            ([\\]`\\\"\\']?)\n            \n            # fifth matching group\n            # a period\n            (\\.)\n            \n            # sixth matching group\n            # 1 or 0 of (opening bracket, backtick, or quotation mark)\n            ([\\[`\\\"\\']?)\n            \n            # seventh matching group\n            # at least 1 word character\n            (\\w+)\n            \n            # eighth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark) folowed by a whitespace character or end of string\n            ([\\]`\\\"\\']?)(?=\\s|$)\n            \n            ',\n        'from_table_2':\n            '(?ix)\n\n            # first matching group\n            # from or join followed by at least 1 whitespace character \n            (from|join)\\s+\n            \n            # second matching group\n            # 1 or 0 of (opening bracket, backtick, or quotation mark)            \n            ([\\[`\\\"\\']?)\n            \n            # third matching group\n            # at least 1 word character\n            (\\w+)\n\n            # fouth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark)            \n            ([\\]`\\\"\\']?)\n            \n            # fifth matching group\n            # a period            \n            (\\.)\n            \n            # sixth matching group\n            # 1 or 0 of (opening bracket, backtick, or quotation mark)\n            ([\\[`\\\"\\']?)\n\n            # seventh matching group\n            # at least 1 word character            \n            (\\w+)\n            \n            # eighth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark) \n            ([\\]`\\\"\\']?)\n            \n            # ninth matching group\n            # a period             \n            (\\.)\n            \n            # tenth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark)             \n            ([\\[`\\\"\\']?)\n            \n            # eleventh matching group\n            # at least 1 word character   \n            (\\w+)\n\n            # twelfth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark) folowed by a whitespace character or end of string\n            ([\\]`\\\"\\']?)(?=\\s|$)\n            \n            ',\n        'from_table_3':\n            '(?ix)\n\n            # first matching group\n            # from or join followed by at least 1 whitespace character             \n            (from|join)\\s+\n            \n            # second matching group\n            # 1 or 0 of (opening bracket, backtick, or quotation mark)            \n            ([\\[`\\\"\\'])\n            \n            # third matching group\n            # at least 1 word character or space \n            ([\\w ]+)\n\n            # fourth matching group\n            # 1 or 0 of (closing bracket, backtick, or quotation mark) folowed by a whitespace character or end of string\n            ([\\]`\\\"\\'])(?=\\s|$)\n            \n            ',\n        'config_block':'(?i)(?s)^.*{{\\s*config\\s*\\([^)]+\\)\\s*}}'\n    } -%}\n\n    {%- set from_list = [] -%}\n    {%- set config_list = [] -%}\n    {%- set ns = namespace(model_sql = model_raw_sql) -%}\n\n    {%- for regex_name, regex_pattern in from_regexes.items() -%}\n\n        {%- set all_regex_matches = re.findall(regex_pattern, model_raw_sql) -%}\n\n        {%- for match in all_regex_matches -%}\n\n            {%- if regex_name == 'config_block' -%}\n                {%- set match_tuple = (match|trim, regex_name) -%}\n                {%- do config_list.append(match_tuple) -%}\n            {%- elif regex_name == 'from_source' -%}    \n                {%- set full_from_clause = match[1:]|join|trim -%}\n                {%- set cte_name = 'source_' + match[6]|lower -%}\n                {%- set match_tuple = (cte_name, full_from_clause, regex_name) -%}\n                {%- do from_list.append(match_tuple) -%} \n            {%- elif regex_name == 'from_table_1' -%}\n                {%- set full_from_clause = match[1:]|join()|trim -%}\n                {%- set cte_name = match[2]|lower + '_' + match[6]|lower -%}\n                {%- set match_tuple = (cte_name, full_from_clause, regex_name) -%}\n                {%- do from_list.append(match_tuple) -%}   \n            {%- elif regex_name == 'from_table_2' -%}\n                {%- set full_from_clause = match[1:]|join()|trim -%}\n                {%- set cte_name = match[2]|lower + '_' + match[6]|lower + '_' + match[10]|lower -%}\n                {%- set match_tuple = (cte_name, full_from_clause, regex_name) -%}\n                {%- do from_list.append(match_tuple) -%}                     \n            {%- else -%}\n                {%- set full_from_clause = match[1:]|join|trim -%}\n                {%- set cte_name = match[2]|trim|lower -%}\n                {%- set match_tuple = (cte_name, full_from_clause, regex_name) -%}\n                {%- do from_list.append(match_tuple) -%}\n            {%- endif -%}\n\n        {%- endfor -%}\n\n        {%- if regex_name == 'config_block' -%}\n        {%- elif regex_name == 'from_source' -%}\n            {%- set ns.model_sql = re.sub(regex_pattern, '\\g<1> source_\\g<7>', ns.model_sql) -%}            \n        {%- elif regex_name == 'from_table_1' -%}\n            {%- set ns.model_sql = re.sub(regex_pattern, '\\g<1> \\g<3>_\\g<7>', ns.model_sql) -%}     \n        {%- elif regex_name == 'from_table_2' -%}\n            {%- set ns.model_sql = re.sub(regex_pattern, '\\g<1> \\g<3>_\\g<7>_\\g<11>', ns.model_sql) -%} \n        {%- else -%}   \n            {%- set ns.model_sql = re.sub(regex_pattern, '\\g<1> \\g<3>', ns.model_sql) -%}         \n        {% endif %}\n\n    {%- endfor -%}\n\n{%- if from_list|length > 0 -%}\n\n{%- set model_import_ctes -%}\n\n    {%- for config_obj in config_list -%}\n\n    {%- set ns.model_sql = ns.model_sql|replace(config_obj[0], '') -%}\n\n{{ config_obj[0] }}\n\n{% endfor -%}\n\n    {%- for from_obj in from_list|unique|sort -%}\n\n{%- if loop.first -%}with {% else -%}{%- if leading_commas -%},{%- endif -%}{%- endif -%}{{ from_obj[0] }} as (\n\n    select * from {{ from_obj[1] }}\n    {%- if from_obj[2] == 'from_source' and from_list|length > 1 %} \n    -- CAUTION: It's best practice to create staging layer for raw sources\n    {%- elif from_obj[2] == 'from_table_1' or from_obj[2] == 'from_table_2' or from_obj[2] == 'from_table_3' %}\n    -- CAUTION: It's best practice to use the ref or source function instead of a direct reference\n    {%- elif from_obj[2] == 'from_var_1' or from_obj[2] == 'from_var_2' %}\n    -- CAUTION: It's best practice to use the ref or source function instead of a var\n    {%- endif %}\n  \n){%- if ((loop.last and does_raw_sql_contain_cte) or (not loop.last)) and not leading_commas -%},{%- endif %}\n\n{% endfor -%}\n\n{%- if does_raw_sql_contain_cte -%}\n    {%- if leading_commas -%}\n        {%- set replace_with = '\\g<1>,' -%}\n    {%- else -%}\n        {%- set replace_with = '\\g<1>' -%}\n    {%- endif -%}\n{{ re.sub(with_regex, replace_with, ns.model_sql, 1)|trim }}\n{%- else -%}\n{{ ns.model_sql|trim }}\n{%- endif -%}\n\n{%- endset -%}\n\n{%- else -%}\n\n{% set model_import_ctes = model_raw_sql %}\n\n{%- endif -%}\n\n{%- if execute -%}\n\n{{ print(model_import_ctes) }}\n{% do return(model_import_ctes) %}\n\n{% endif %}\n\n{% endmacro %}"
  },
  {
    "path": "macros/generate_model_yaml.sql",
    "content": "{% macro generate_column_yaml(column, model_yaml, column_desc_dict, include_data_types, parent_column_name=\"\") %}\n  {{ return(adapter.dispatch('generate_column_yaml', 'codegen')(column, model_yaml, column_desc_dict, include_data_types, parent_column_name)) }}\n{% endmacro %}\n\n{% macro default__generate_column_yaml(column, model_yaml, column_desc_dict, include_data_types, parent_column_name) %}\n    {% if parent_column_name %}\n        {% set column_name = parent_column_name ~ \".\" ~ column.name %}\n    {% else %}\n        {% set column_name = column.name %}\n    {% endif %}\n\n    {% do model_yaml.append('      - name: ' ~ column_name  | lower ) %}\n    {% if include_data_types %}\n        {% do model_yaml.append('        data_type: ' ~ codegen.data_type_format_model(column)) %}\n    {% endif %}\n    {% do model_yaml.append('        description: ' ~ (column_desc_dict.get(column.name | lower,'') | tojson)) %}\n    {% do model_yaml.append('') %}\n\n    {% if column.fields|length > 0 %}\n        {% for child_column in column.fields %}\n            {% set model_yaml = codegen.generate_column_yaml(child_column, model_yaml, column_desc_dict, include_data_types, parent_column_name=column_name) %}\n        {% endfor %}\n    {% endif %}\n    {% do return(model_yaml) %}\n{% endmacro %}\n\n{% macro generate_model_yaml(model_names=[], upstream_descriptions=False, include_data_types=True) -%}\n  {{ return(adapter.dispatch('generate_model_yaml', 'codegen')(model_names, upstream_descriptions, include_data_types)) }}\n{%- endmacro %}\n\n{% macro default__generate_model_yaml(model_names, upstream_descriptions, include_data_types) %}\n\n    {% set model_yaml=[] %}\n\n    {% do model_yaml.append('version: 2') %}\n    {% do model_yaml.append('') %}\n    {% do model_yaml.append('models:') %}\n\n    {% if model_names is string %}\n        {{ exceptions.raise_compiler_error(\"The `model_names` argument must always be a list, even if there is only one model.\") }}\n    {% else %}\n        {% for model in model_names %}\n            {% do model_yaml.append('  - name: ' ~ model | lower) %}\n            {% do model_yaml.append('    description: \"\"') %}\n            {% do model_yaml.append('    columns:') %}\n\n            {% set relation=ref(model) %}\n            {%- set columns = adapter.get_columns_in_relation(relation) -%}\n            {% set column_desc_dict =  codegen.build_dict_column_descriptions(model) if upstream_descriptions else {} %}\n\n            {% for column in columns %}\n                {% set model_yaml = codegen.generate_column_yaml(column, model_yaml, column_desc_dict, include_data_types) %}\n            {% endfor %}\n        {% endfor %}\n    {% endif %}\n\n{% if execute %}\n\n    {% set joined = model_yaml | join ('\\n') %}\n    {{ print(joined) }}\n    {% do return(joined) %}\n\n{% endif %}\n\n{% endmacro %}"
  },
  {
    "path": "macros/generate_source.sql",
    "content": "{% macro get_tables_in_schema(schema_name, database_name=target.database, table_pattern='%', exclude='') %}\n\n    {% set tables=dbt_utils.get_relations_by_pattern(\n        schema_pattern=schema_name,\n        database=database_name,\n        table_pattern=table_pattern,\n        exclude=exclude\n    ) %}\n\n    {% set table_list= tables | map(attribute='identifier') %}\n\n    {{ return(table_list | sort) }}\n\n{% endmacro %}\n\n{% macro generate_source(schema_name, database_name=target.database, generate_columns=False, include_descriptions=False, include_data_types=True, table_pattern='%', exclude='', name=schema_name, table_names=None, include_database=False, include_schema=False, case_sensitive_databases=False, case_sensitive_schemas=False, case_sensitive_tables=False, case_sensitive_cols=False) %}\n    {{ return(adapter.dispatch('generate_source', 'codegen')(schema_name, database_name, generate_columns, include_descriptions, include_data_types, table_pattern, exclude, name, table_names, include_database, include_schema, case_sensitive_databases, case_sensitive_schemas, case_sensitive_tables, case_sensitive_cols)) }}\n{% endmacro %}\n\n{% macro default__generate_source(schema_name, database_name, generate_columns, include_descriptions, include_data_types, table_pattern, exclude, name, table_names, include_database, include_schema, case_sensitive_databases, case_sensitive_schemas, case_sensitive_tables, case_sensitive_cols) %}\n\n{% set sources_yaml=[] %}\n{% do sources_yaml.append('version: 2') %}\n{% do sources_yaml.append('') %}\n{% do sources_yaml.append('sources:') %}\n{% do sources_yaml.append('  - name: ' ~ name | lower) %}\n\n{% if include_descriptions %}\n    {% do sources_yaml.append('    description: \"\"' ) %}\n{% endif %}\n\n{% if database_name != target.database or include_database %}\n{% do sources_yaml.append('    database: ' ~ (database_name if case_sensitive_databases else database_name | lower)) %}\n{% endif %}\n\n{% if schema_name != name or include_schema %}\n{% do sources_yaml.append('    schema: ' ~ (schema_name if case_sensitive_schemas else schema_name | lower)) %}\n{% endif %}\n\n{% do sources_yaml.append('    tables:') %}\n\n{% if table_names is none %}\n{% set tables=codegen.get_tables_in_schema(schema_name, database_name, table_pattern, exclude) %}\n{% else %}\n{% set tables = table_names %}\n{% endif %}\n\n{% for table in tables %}\n    {% do sources_yaml.append('      - name: ' ~ (table if case_sensitive_tables else table | lower) ) %}\n    {% if include_descriptions %}\n        {% do sources_yaml.append('        description: \"\"' ) %}\n    {% endif %}\n    {% if generate_columns %}\n    {% do sources_yaml.append('        columns:') %}\n\n        {% set table_relation=api.Relation.create(\n            database=database_name,\n            schema=schema_name,\n            identifier=table\n        ) %}\n\n        {% set columns=adapter.get_columns_in_relation(table_relation) %}\n\n        {% for column in columns %}\n            {% do sources_yaml.append('          - name: ' ~ (column.name if case_sensitive_cols else column.name | lower)) %}\n            {% if include_data_types %}\n                {% do sources_yaml.append('            data_type: ' ~ codegen.data_type_format_source(column)) %}\n            {% endif %}\n            {% if include_descriptions %}\n                {% do sources_yaml.append('            description: \"\"' ) %}\n            {% endif %}\n        {% endfor %}\n            {% do sources_yaml.append('') %}\n\n    {% endif %}\n\n{% endfor %}\n\n{% if execute %}\n\n    {% set joined = sources_yaml | join ('\\n') %}\n    {{ print(joined) }}\n    {% do return(joined) %}\n\n{% endif %}\n\n{% endmacro %}\n"
  },
  {
    "path": "macros/generate_unit_test_template.sql",
    "content": "{% macro generate_unit_test_template(model_name, inline_columns=false) %}\n  {{ return(adapter.dispatch('generate_unit_test_template', 'codegen')(model_name, inline_columns)) }}\n{% endmacro %}\n\n{% macro default__generate_unit_test_template(model_name, inline_columns=false) %}\n\n    {%- set ns = namespace(depends_on_list = []) -%}\n\n    {%- if execute -%}\n\n    -- getting inputs and materialization info\n    {%- for node in graph.nodes.values()\n        | selectattr(\"resource_type\", \"equalto\", \"model\")\n        | selectattr(\"name\", \"equalto\", model_name) -%}\n        {%- set ns.depends_on_list = ns.depends_on_list + node.depends_on.nodes -%}\n        {%- set ns.this_materialization = node.config['materialized'] -%}\n    {%- endfor -%}\n\n    {%- endif -%}\n\n    -- getting the input columns\n    {%- set ns.input_columns_list = [] -%}\n    {%- for item in ns.depends_on_list -%}\n        {%- set input_columns_list = [] -%}\n        {%- set item_dict = codegen.get_resource_from_unique_id(item) -%}\n        {%- if item_dict.resource_type == 'source' %}\n            {%- set columns = adapter.get_columns_in_relation(source(item_dict.source_name, item_dict.identifier)) -%}\n        {%- else -%}\n            {%- set columns = adapter.get_columns_in_relation(ref(item_dict.alias)) -%}\n        {%- endif -%}\n        {%- for column in columns -%}\n            {{ input_columns_list.append(column.name|lower) }}\n        {%- endfor -%}\n        {{ ns.input_columns_list.append(input_columns_list) }}\n    {%- endfor -%}\n\n    -- getting 'this' columns\n    {% set relation_exists = load_relation(ref(model_name)) is not none %}\n    {% if relation_exists %}\n        {%- set ns.expected_columns_list = [] -%}\n        {%- set columns = adapter.get_columns_in_relation(ref(model_name)) -%}\n        {%- for column in columns -%}\n            {{ ns.expected_columns_list.append(column.name|lower) }}\n        {%- endfor -%}\n    {% endif %}\n\n    {%- set unit_test_yaml_template -%}\nunit_tests:\n  - name: unit_test_{{ model_name }}\n    model: {{ model_name }}\n{% if ns.this_materialization == 'incremental' %}\n    overrides:\n      macros:\n        is_incremental: true\n{% else -%}\n\n{%- endif %}\n    given: {%- if ns.depends_on_list|length == 0 and ns.this_materialization != 'incremental' %} []{% endif %}\n    {%- for i in range(ns.depends_on_list|length) -%}\n        {%- set item_dict = codegen.get_resource_from_unique_id(ns.depends_on_list[i]) -%}\n        {% if item_dict.resource_type == 'source' %}\n      - input: source(\"{{item_dict.source_name}}\", \"{{item_dict.identifier}}\")\n        rows:\n        {%- else %}\n      - input: ref(\"{{item_dict.alias}}\")\n        rows:\n        {%- endif -%}\n        {%- if inline_columns -%}\n            {%- set ns.column_string = '- {' -%}\n            {%- for column_name in ns.input_columns_list[i] -%}\n                {%- if not loop.last -%}\n                    {%- set ns.column_string = ns.column_string ~ column_name ~ ': , ' -%}\n                {%- else -%}\n                    {%- set ns.column_string = ns.column_string ~ column_name ~ ': }' -%}\n                {%- endif -%}\n            {% endfor %}\n        {%- else -%}\n            {%- set ns.column_string = '' -%}\n            {%- for column_name in ns.input_columns_list[i] -%}\n                {%- if loop.first -%}\n                    {%- set ns.column_string = ns.column_string ~ '- ' ~ column_name ~ ': ' -%}\n                {%- else -%}\n                    {%- set ns.column_string = ns.column_string ~ '\\n            ' ~ column_name ~ ': ' -%}\n                {%- endif -%}\n            {% endfor %}\n        {%- endif %}\n          {{ns.column_string}}\n    {%- endfor %}\n\n    {%- if ns.this_materialization == 'incremental' %}\n      - input: this\n        rows:\n        {%- if relation_exists -%}\n            {%- if inline_columns -%}\n                {%- set ns.column_string = '- {' -%}\n                {%- for column_name in ns.expected_columns_list -%}\n                    {%- if not loop.last -%}\n                        {%- set ns.column_string = ns.column_string ~ column_name ~ ': , ' -%}\n                    {%- else -%}\n                        {%- set ns.column_string = ns.column_string ~ column_name ~ ': }' -%}\n                    {%- endif -%}\n                {% endfor %}\n            {%- else -%}\n                {%- set ns.column_string = '' -%}\n                {%- for column_name in ns.expected_columns_list -%}\n                    {%- if loop.first -%}\n                        {%- set ns.column_string = ns.column_string ~ '- ' ~ column_name ~ ': ' -%}\n                    {%- else -%}\n                        {%- set ns.column_string = ns.column_string ~ '\\n            ' ~ column_name ~ ': ' -%}\n                    {%- endif -%}\n                {% endfor %}\n            {%- endif %}\n          {{ns.column_string}}\n        {%- endif %}\n    {%- endif %}\n\n    expect:\n      rows:\n        {%- if relation_exists -%}\n            {%- if inline_columns -%}\n                {%- set ns.column_string = '- {' -%}\n                {%- for column_name in ns.expected_columns_list -%}\n                    {%- if not loop.last -%}\n                        {%- set ns.column_string = ns.column_string ~ column_name ~ ': , ' -%}\n                    {%- else -%}\n                        {%- set ns.column_string = ns.column_string ~ column_name ~ ': }' -%}\n                    {%- endif -%}\n                {% endfor %}\n            {%- else -%}\n                {%- set ns.column_string = '' -%}\n                {%- for column_name in ns.expected_columns_list -%}\n                    {%- if loop.first -%}\n                        {%- set ns.column_string = ns.column_string ~ '- ' ~ column_name ~ ': ' -%}\n                    {%- else -%}\n                        {%- set ns.column_string = ns.column_string ~ '\\n          ' ~ column_name ~ ': ' -%}\n                    {%- endif -%}\n                {% endfor %}\n            {%- endif %}\n        {{ns.column_string}}\n    {%- endif -%}\n\n    {%- endset -%}\n\n    {% if execute %}\n\n        {{ print(unit_test_yaml_template) }}\n        {% do return(unit_test_yaml_template) %}\n\n    {% endif %}\n\n{% endmacro %}\n"
  },
  {
    "path": "macros/helpers/helpers.sql",
    "content": "{# retrieve models directly upstream from a given model #}\n{% macro get_model_dependencies(model_name) %}\n    {% for node in graph.nodes.values() | selectattr('name', \"equalto\", model_name) %}\n        {{ return(node.depends_on.nodes) }}\n    {% endfor %}\n{% endmacro %}\n\n\n{# add to an input dictionary entries containing all the column descriptions of a given model #}\n{% macro add_model_column_descriptions_to_dict(resource_type, model_name, dict_with_descriptions={}) %}\n    {% if resource_type == 'source' %}\n        {# sources aren't part of graph.nodes #}\n        {% set nodes = graph.sources %}\n    {% else %}\n        {% set nodes = graph.nodes %}\n    {% endif %}\n    {% for node in nodes.values()\n        | selectattr('resource_type', 'equalto', resource_type)\n        | selectattr('name', 'equalto', model_name) %}\n        {% for col_name, col_values in node.columns.items() %}\n            {% do dict_with_descriptions.update( {col_name: col_values.description} ) %}\n        {% endfor %}\n    {% endfor %}\n    {{ return(dict_with_descriptions) }}\n{% endmacro %}\n\n{# build a global dictionary looping through all the direct parents models #}\n{# if the same column name exists with different descriptions it is overwritten at each loop #}\n{% macro build_dict_column_descriptions(model_name) %}\n    {% if execute %}\n        {% set glob_dict = {} %}\n        {% for full_model in codegen.get_model_dependencies(model_name) %}\n            {% do codegen.add_model_column_descriptions_to_dict(\n                full_model.split('.')[0], full_model.split('.')[-1], glob_dict\n            ) %}\n        {% endfor %}\n        {{ return(glob_dict) }}\n    {% endif %}\n{% endmacro %}\n\n{# build a list of models looping through all models in the project #}\n{# filter by directory or prefix arguments, if provided #}\n{% macro get_models(directory=None, prefix=None) %}\n    {% set model_names=[] %}\n    {% set models = graph.nodes.values() | selectattr('resource_type', \"equalto\", 'model') %}\n    {% if directory and prefix %}\n        {% for model in models %}\n            {% set model_path = \"/\".join(model.path.split(\"/\")[:-1]) %}\n            {% if model_path == directory and model.name.startswith(prefix) %}\n                {% do model_names.append(model.name) %}\n            {% endif %}\n        {% endfor %}\n    {% elif directory %}\n        {% for model in models %}\n            {% set model_path = \"/\".join(model.path.split(\"/\")[:-1]) %}\n            {% if model_path == directory %}\n                {% do model_names.append(model.name) %}\n            {% endif %}\n        {% endfor %}\n    {% elif prefix %}\n        {% for model in models if model.name.startswith(prefix) %}\n            {% do model_names.append(model.name) %}\n        {% endfor %}\n    {% else %}\n        {% for model in models %}\n            {% do model_names.append(model.name) %}\n        {% endfor %}\n    {% endif %}\n    {{ return(model_names) }}\n{% endmacro %}\n\n{% macro data_type_format_source(column) -%}\n  {{ return(adapter.dispatch('data_type_format_source', 'codegen')(column)) }}\n{%- endmacro %}\n\n{# format a column data type for a source #}\n{% macro default__data_type_format_source(column) %}\n    {% set formatted = codegen.format_column(column) %}\n    {{ return(formatted['data_type'] | lower) }}\n{% endmacro %}\n\n{% macro data_type_format_model(column) -%}\n  {{ return(adapter.dispatch('data_type_format_model', 'codegen')(column)) }}\n{%- endmacro %}\n\n{# format a column data type for a model #}\n{% macro default__data_type_format_model(column) %}\n    {% set formatted = codegen.format_column(column) %}\n    {{ return(formatted['data_type'] | lower) }}\n{% endmacro %}\n\n{# retrieve entire resource dictionary based on unique id #}\n{% macro get_resource_from_unique_id(resource_unique_id) %}\n    {% set resource_type = resource_unique_id.split('.')[0] %}\n    {% if resource_type == 'source' %}\n        {% set resource = graph.sources[resource_unique_id] %}\n    {% elif resource_type == 'exposure' %}\n        {% set resource = graph.exposure[resource_unique_id] %}\n    {% elif resource_type == 'metric' %}\n        {% set resource = graph.metrics[resource_unique_id] %}\n    {% else %}\n        {% set resource = graph.nodes[resource_unique_id] %}\n    {% endif %}\n    {{ return(resource) }}\n{% endmacro %}\n"
  },
  {
    "path": "macros/vendored/dbt_core/format_column.sql",
    "content": "{% macro format_column(column) -%}\n  {{ return(adapter.dispatch('format_column', 'codegen')(column)) }}\n{%- endmacro %}\n\n{# Vendored from: https://github.com/dbt-labs/dbt-adapters/blob/c7b12aee533184bad391a657d1753539d1dd496a/dbt/include/global_project/macros/relations/column/columns_spec_ddl.sql#L85-L89 #}\n{% macro default__format_column(column) -%}\n  {% set data_type = column.dtype %}\n  {% set formatted = column.column.lower() ~ \" \" ~ data_type %}\n  {{ return({'name': column.name, 'data_type': data_type, 'formatted': formatted}) }}\n{%- endmacro -%}\n\n{# Vendored from: https://github.com/dbt-labs/dbt-bigquery/blob/4d255b2f854d21d5d8871bdaa8d7ab47e7e863a3/dbt/include/bigquery/macros/utils/get_columns_spec_ddl.sql#L1-L5 #}\n{# But modified to handle https://github.com/dbt-labs/dbt-codegen/issues/190 #}\n{% macro bigquery__format_column(column) -%}\n  {% set data_type = column.data_type %}\n  {% if column.mode.lower() == \"repeated\" and column.dtype.lower() == \"record\" %}\n    {% set data_type = \"array\" %}\n  {% endif %}\n  {% set formatted = column.column.lower() ~ \" \" ~ data_type %}\n  {{ return({'name': column.name, 'data_type': data_type, 'formatted': formatted}) }}\n{%- endmacro -%}\n"
  },
  {
    "path": "packages.yml",
    "content": "packages:\n  - package: dbt-labs/dbt_utils\n    version: [\">=0.8.0\", \"<2.0.0\"]\n"
  },
  {
    "path": "run_test.sh",
    "content": "#!/bin/bash\n\necho `pwd`\ncd integration_tests\ncp ci/sample.profiles.yml profiles.yml\n\ndbt --warn-error deps --target $1 || exit 1\ndbt --warn-error run-operation create_source_table --target $1 || exit 1\ndbt --warn-error seed --target $1 --full-refresh || exit 1\ndbt --warn-error run --target $1 || exit 1\ndbt --warn-error test --target $1 || exit 1\n"
  },
  {
    "path": "supported_adapters.env",
    "content": "SUPPORTED_ADAPTERS=postgres,snowflake,redshift,bigquery\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nskipsdist = True\nenvlist = lint_all, testenv\n\n[testenv]\npassenv =\n    # postgres env vars\n    POSTGRES_HOST\n    POSTGRES_USER\n    DBT_ENV_SECRET_POSTGRES_PASS\n    POSTGRES_PORT\n    POSTGRES_DATABASE\n    POSTGRES_SCHEMA\n    # snowflake env vars\n    SNOWFLAKE_ACCOUNT\n    SNOWFLAKE_USER\n    DBT_ENV_SECRET_SNOWFLAKE_PASS\n    SNOWFLAKE_ROLE\n    SNOWFLAKE_DATABASE\n    SNOWFLAKE_WAREHOUSE\n    SNOWFLAKE_SCHEMA\n    # bigquery env vars\n    BIGQUERY_PROJECT\n    BIGQUERY_SCHEMA\n    BIGQUERY_KEYFILE_JSON\n    # redshift env vars\n    REDSHIFT_HOST\n    REDSHIFT_USER\n    DBT_ENV_SECRET_REDSHIFT_PASS\n    REDSHIFT_DATABASE\n    REDSHIFT_SCHEMA\n    REDSHIFT_PORT\n\n# Postgres integration tests for centralized dbt testing\n# run dbt commands directly, assumes dbt is already installed in environment\n[testenv:dbt_integration_postgres]\nchangedir = integration_tests\nallowlist_externals = \n    dbt\nskip_install = true\ncommands =\n    dbt --version\n    dbt --warn-error deps --target postgres\n    dbt --warn-error run-operation create_source_table --target postgres\n    dbt --warn-error seed --target postgres --full-refresh\n    dbt --warn-error run --target postgres\n    dbt --warn-error test --target postgres\n\n# snowflake integration tests for centralized dbt testing\n# run dbt commands directly, assumes dbt is already installed in environment\n[testenv:dbt_integration_snowflake]\nchangedir = integration_tests\nallowlist_externals = \n    dbt\nskip_install = true\ncommands =\n    dbt --version\n    dbt --warn-error deps --target snowflake\n    dbt --warn-error run-operation create_source_table --target snowflake\n    dbt --warn-error seed --target snowflake --full-refresh\n    dbt --warn-error run --target snowflake\n    dbt --warn-error test --target snowflake\n\n# bigquery integration tests for centralized dbt testing\n# run dbt commands directly, assumes dbt is already installed in environment\n[testenv:dbt_integration_bigquery]\nchangedir = integration_tests\nallowlist_externals = \n    dbt\nskip_install = true\ncommands =\n    dbt --version\n    dbt --warn-error deps --target bigquery\n    dbt --warn-error run-operation create_source_table --target bigquery\n    dbt --warn-error seed --target bigquery --full-refresh\n    dbt --warn-error run --target bigquery\n    dbt --warn-error test --target bigquery\n\n# redshift integration tests for centralized dbt testing\n# run dbt commands directly, assumes dbt is already installed in environment\n[testenv:dbt_integration_redshift]\nchangedir = integration_tests\nallowlist_externals = \n    dbt\nskip_install = true\ncommands =\n    dbt --version\n    dbt --warn-error deps --target redshift\n    dbt --warn-error run-operation create_source_table --target redshift\n    dbt --warn-error seed --target redshift --full-refresh\n    dbt --warn-error run --target redshift\n    dbt --warn-error test --target redshift\n"
  }
]