[
  {
    "path": ".codeclimate.yml",
    "content": "---\nengines:\n  duplication:\n    enabled: true\n    config:\n      languages:\n        javascript:\n          mass_threshold: 50\n\n  eslint:\n    enabled: true\n  fixme:\n    enabled: true\nratings:\n  paths:\n  - \"**.js\"\nexclude_paths:\n- config/\n- tests/\n- vendor/\n"
  },
  {
    "path": ".editorconfig",
    "content": "# EditorConfig helps developers define and maintain consistent\n# coding styles between different editors and IDEs\n# editorconfig.org\n\nroot = true\n\n\n[*]\nend_of_line = lf\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newline = true\nindent_style = space\nindent_size = 2\n\n[*.hbs]\ninsert_final_newline = false\n\n[*.{diff,md}]\ntrim_trailing_whitespace = false\n"
  },
  {
    "path": ".ember-cli",
    "content": "{\n  /**\n    Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript\n    rather than JavaScript by default, when a TypeScript version of a given blueprint is available.\n  */\n  \"isTypeScriptProject\": false\n}\n"
  },
  {
    "path": ".github/renovate.json5",
    "content": "{\n  \"$schema\": \"https://docs.renovatebot.com/renovate-schema.json\",\n  \"extends\": [\n    \"config:base\",\n    \":automergeLinters\",\n    \":automergeTesters\",\n    \":dependencyDashboard\",\n    \":maintainLockFilesWeekly\",\n    \":pinOnlyDevDependencies\",\n    \":prConcurrentLimitNone\",\n    \":semanticCommitsDisabled\",\n    \"github>Turbo87/renovate-config:automergeCaretConstraint\",\n    \"github>Turbo87/renovate-config:commitTopics\",\n    \"github>NullVoxPopuli/renovate:npm.json5\"\n  ],\n}\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches: [ master, main, 'v*' ]\n  pull_request:\n    branches: [ master, main ]\n\nconcurrency:\n  group: ci-${{ github.head_ref || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  setup:\n    name: 'Setup'\n    runs-on: ubuntu-latest\n    timeout-minutes: 2\n    outputs:\n      matrix: ${{ steps.set-matrix.outputs.matrix }}\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-node@v6\n        with:\n          # This version is different, because we want newer node features\n          # so that we can skip pnpm install for this job\n          node-version: 24\n      - id: set-matrix\n        run: |\n          echo \"matrix=$(node ./node-tests/list.mjs)\" >> $GITHUB_OUTPUT\n\n  lint:\n    name: Lints\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: 'pnpm'\n      - run: pnpm install\n      - run: pnpm lint\n\n  test:\n    name: \"Ember | ${{ matrix.app.name }}\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n    strategy:\n      fail-fast: false\n      matrix:\n        app:\n          - { name: \"Broccoli (v1 Addon)\", dir: \".\", cmd: \"pnpm test:ember\" }\n          - { name: \"Broccoli (v1 App)\", dir: './test-apps/broccoli', cmd: 'pnpm test:ember' }\n          - { name: \"Webpack + Embroider 3 \", dir: './test-apps/embroider3-webpack', cmd: 'pnpm test:ember' }\n          - { name: \"Vite + Compat\", dir: './test-apps/vite-with-compat', cmd: 'pnpm build:tests && pnpm test:exam' }\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: 'pnpm'\n      - run: pnpm install\n      - run: ${{ matrix.app.cmd }}\n        working-directory: ${{ matrix.app.dir }}\n\n  test-node:\n    name: \"Mocha | ${{ matrix.name }}\"\n    needs: [\"setup\"]\n    runs-on: ubuntu-latest\n    timeout-minutes: 30\n    strategy:\n      fail-fast: false\n      matrix: ${{fromJson(needs.setup.outputs.matrix)}}\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: 'pnpm'\n      - run: pnpm install\n      - run: ${{ matrix.command }}\n\n  floating-dependencies:\n    name: \"Floating Dependencies\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: 'pnpm'\n      - run: pnpm install\n      - run: pnpm test:ember\n\n  try-scenarios:\n    name: \"Try: ${{ matrix.ember-try-scenario }}\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n    needs: test\n\n    strategy:\n      fail-fast: false\n      matrix:\n        ember-try-scenario:\n          - ember-lts-4.8\n          - ember-lts-4.12\n          - ember-release\n          - ember-beta\n          - ember-canary\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: 'pnpm'\n      - run: pnpm install\n      - run: node_modules/.bin/ember try:one ${{ matrix.ember-try-scenario }} --skip-cleanup\n"
  },
  {
    "path": ".github/workflows/gh-pages.yml",
    "content": "name: Deploy\n\non:\n  push:\n    branches: [ master, main, 'v*' ]\n\nconcurrency:\n  group: gh-pages-${{ github.head_ref || github.ref }}\n  cancel-in-progress: true\n\n\njobs:\n  # Build job\n  build:\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: 'pnpm'\n      - run: pnpm install\n      - run: cd docs-app && pnpm docs:build\n\n      - name: Upload static files as artifact\n        id: deployment\n        uses: actions/upload-pages-artifact@v4\n        with:\n          path: docs-app/.vitepress/dist\n\n  deploy:\n    needs: build\n\n    permissions:\n      pages: write      # to deploy to Pages\n      id-token: write   # to verify the deployment originates from an appropriate source\n\n    # Deploy to the github-pages environment\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n\n    # Specify runner + deployment step\n    runs-on: ubuntu-latest\n    steps:\n      - name: Deploy to GitHub Pages\n        id: deployment\n        uses: actions/deploy-pages@v4 # or specific \"vX.X.X\" version tag for this action\n"
  },
  {
    "path": ".github/workflows/plan-release.yml",
    "content": "name: Plan Release\non:\n  workflow_dispatch:\n  push:\n    branches:\n      - main\n      - master\n  pull_request_target: # This workflow has permissions on the repo, do NOT run code from PRs in this workflow. See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/\n    types:\n      - labeled\n      - unlabeled\n\nconcurrency:\n  group: plan-release # only the latest one of these should ever be running\n  cancel-in-progress: true\n\njobs:\n  should-run-release-plan-prepare:\n    name: Should we run release-plan prepare?\n    runs-on: ubuntu-latest\n    outputs:\n      should-prepare: ${{ steps.should-prepare.outputs.should-prepare }}\n    steps:\n      - uses: release-plan/actions/should-prepare-release@v1\n        with:\n          ref: 'main'\n        id: should-prepare\n\n  create-prepare-release-pr:\n    name: Create Prepare Release PR\n    runs-on: ubuntu-latest\n    timeout-minutes: 5\n    needs: should-run-release-plan-prepare\n    permissions:\n      contents: write\n      issues: read\n      pull-requests: write\n    if: needs.should-run-release-plan-prepare.outputs.should-prepare == 'true'\n    steps:\n      - uses: release-plan/actions/prepare@v1\n        name: Run release-plan prepare\n        with:\n          ref: 'main'\n        env:\n          GITHUB_AUTH: ${{ secrets.GITHUB_TOKEN }}\n        id: explanation\n\n      - uses: peter-evans/create-pull-request@v8\n        name: Create Prepare Release PR\n        with:\n          commit-message: \"Prepare Release ${{ steps.explanation.outputs.new-version}} using 'release-plan'\"\n          labels: \"internal\"\n          sign-commits: true\n          branch: release-preview\n          title: Prepare Release ${{ steps.explanation.outputs.new-version }}\n          body: |\n            This PR is a preview of the release that [release-plan](https://github.com/embroider-build/release-plan) has prepared. To release you should just merge this PR 👍\n\n            -----------------------------------------\n\n            ${{ steps.explanation.outputs.text }}\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "# For every push to the primary branch with .release-plan.json modified,\n# runs release-plan.\n\nname: Publish Stable\n\non:\n  workflow_dispatch:\n  push:\n    branches:\n      - main\n      - master\n    paths:\n      - '.release-plan.json'\n\nconcurrency:\n  group: publish-${{ github.head_ref || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  publish:\n    name: \"NPM Publish\"\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n      id-token: write\n      attestations: write\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v4\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 22\n          registry-url: 'https://registry.npmjs.org'\n          cache: pnpm\n      - run: npm install -g npm@latest # ensure that the globally installed npm is new enough to support OIDC\n      - run: pnpm install --frozen-lockfile\n      - name: Publish to NPM\n        run: NPM_CONFIG_PROVENANCE=true pnpm release-plan publish\n        env:\n          GITHUB_AUTH: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: Release\n\non:\n  push:\n    tags:\n      - 'v*'\n\njobs:\n  release:\n    name: release\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          registry-url: 'https://registry.npmjs.org'\n\n      - run: yarn install\n      - run: yarn auto-dist-tag --write\n\n      - run: npm publish\n        env:\n          NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# compiled output\n/dist/\n/acceptance-dist/\ndist-*/\ntest-execution-*.json\n/declarations/\n\n# dependencies\n/node_modules/\n\n# misc\n/connect.lock\n.log/\n/.env*\n/.pnp*\n/.eslintcache\n/coverage/\n/libpeerconnection.log\n/npm-debug.log*\n/testem.log\n/yarn-error.log\n\n# ember-try\n/.node_modules.ember-try/\n/npm-shrinkwrap.json.ember-try\n/package.json.ember-try\n/.nyc_output/\n/package-lock.json.ember-try\n/yarn.lock.ember-try\n\n# broccoli-debug\n/DEBUG/\n"
  },
  {
    "path": ".npmignore",
    "content": "# compiled output\n/dist/\n/tmp/\n\n# misc\n/.codeclimate.yml\n/.editorconfig\n/.ember-cli\n/.env*\n/.eslintcache\n/.eslintignore\n/.eslintrc.js\n/.git/\n/.github/\n/.gitignore\n/.prettierignore\n/.prettierrc.js\n/.stylelintignore\n/.stylelintrc.js\n/.template-lintrc.js\n/.travis.yml\n/.watchmanconfig\n/CHANGELOG.md\n/CONTRIBUTING.md\n/config/\n/ember-cli-build.js\n/node-tests/\n/RELEASE.md\n/testem*.js\n/tests/\n/yarn.lock\n/*.tgz\n.gitkeep\neslint.config.mjs\n\n# ember-try\n/.node_modules.ember-try/\n/npm-shrinkwrap.json.ember-try\n/package.json.ember-try\n/.nyc_output/\n/package-lock.json.ember-try\n/yarn.lock.ember-try\n"
  },
  {
    "path": ".prettierignore",
    "content": "# unconventional js\n/blueprints/*/files/\n\n# compiled output\n/dist/\ndocs-app/\ntest-apps/\nacceptance-dist/\nfailure-dist/\n\n# misc\n/coverage/\n!.*\n.*/\n\n# ember-try\n/.node_modules.ember-try/\n\n# Ignored when enabling prettier in CI\n# Changes are too much, and also not super functional\n*.yml\n*.yaml\n*.md\n*.html\n*.json\n"
  },
  {
    "path": ".prettierrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  overrides: [\n    {\n      files: '*.{js,ts}',\n      options: {\n        singleQuote: true,\n      },\n    },\n  ],\n};\n"
  },
  {
    "path": ".release-plan.json",
    "content": "{\n  \"solution\": {\n    \"ember-exam\": {\n      \"impact\": \"minor\",\n      \"oldVersion\": \"10.0.1\",\n      \"newVersion\": \"10.1.0\",\n      \"tagName\": \"latest\",\n      \"constraints\": [\n        {\n          \"impact\": \"minor\",\n          \"reason\": \"Appears in changelog section :rocket: Enhancement\"\n        },\n        {\n          \"impact\": \"patch\",\n          \"reason\": \"Appears in changelog section :memo: Documentation\"\n        }\n      ],\n      \"pkgJSONPath\": \"./package.json\"\n    }\n  },\n  \"description\": \"## Release (2025-12-19)\\n\\n* ember-exam 10.1.0 (minor)\\n\\n#### :rocket: Enhancement\\n* `ember-exam`\\n  * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski))\\n\\n#### :memo: Documentation\\n* `ember-exam`\\n  * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski))\\n  * [#1455](https://github.com/ember-cli/ember-exam/pull/1455) Update README.md with timeout help ([@apellerano-pw](https://github.com/apellerano-pw))\\n\\n#### Committers: 2\\n- Andrew Pellerano ([@apellerano-pw](https://github.com/apellerano-pw))\\n- Ben Demboski ([@bendemboski](https://github.com/bendemboski))\\n\"\n}\n"
  },
  {
    "path": ".watchmanconfig",
    "content": "{\n  \"ignore_dirs\": [\"dist\"]\n}\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\n## Release (2025-12-19)\n\n* ember-exam 10.1.0 (minor)\n\n#### :rocket: Enhancement\n* `ember-exam`\n  * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski))\n\n#### :memo: Documentation\n* `ember-exam`\n  * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski))\n  * [#1455](https://github.com/ember-cli/ember-exam/pull/1455) Update README.md with timeout help ([@apellerano-pw](https://github.com/apellerano-pw))\n\n#### Committers: 2\n- Andrew Pellerano ([@apellerano-pw](https://github.com/apellerano-pw))\n- Ben Demboski ([@bendemboski](https://github.com/bendemboski))\n\n## Release (2025-12-03)\n\n* ember-exam 10.0.1 (patch)\n\n#### :bug: Bug Fix\n* `ember-exam`\n  * [#1482](https://github.com/ember-cli/ember-exam/pull/1482) Read configFile From commandOptions ([@jrjohnson](https://github.com/jrjohnson))\n\n#### Committers: 1\n- Jon Johnson ([@jrjohnson](https://github.com/jrjohnson))\n\n## Release (2025-08-26)\n\n* ember-exam 10.0.0 (major)\n\n#### :boom: Breaking Change\n* `ember-exam`\n  * [#1430](https://github.com/ember-cli/ember-exam/pull/1430) Support vite ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n\n#### :bug: Bug Fix\n* `ember-exam`\n  * [#1450](https://github.com/ember-cli/ember-exam/pull/1450) Support cjs testem configs ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n\n#### :memo: Documentation\n* `ember-exam`\n  * [#1347](https://github.com/ember-cli/ember-exam/pull/1347) Update setup example for new qunit requirements ([@elwayman02](https://github.com/elwayman02))\n\n#### :house: Internal\n* `ember-exam`\n  * [#1449](https://github.com/ember-cli/ember-exam/pull/1449) Spit node-tests in to parallel jobs for easier retries (we have a very short testem timeout) ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1451](https://github.com/ember-cli/ember-exam/pull/1451) Delete test duplication and use symlinks instead ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1452](https://github.com/ember-cli/ember-exam/pull/1452) Remove extraneous command in CI workflow ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1448](https://github.com/ember-cli/ember-exam/pull/1448) Split out try scenarios in to real apps for easier debugging ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1443](https://github.com/ember-cli/ember-exam/pull/1443) Remove unused deps ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1442](https://github.com/ember-cli/ember-exam/pull/1442) Get rid of custom resolver form an older era of the blueprint ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1439](https://github.com/ember-cli/ember-exam/pull/1439) Update renovate-config (move to weekly) ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1441](https://github.com/ember-cli/ember-exam/pull/1441) Set base for pages deployment ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1440](https://github.com/ember-cli/ember-exam/pull/1440) Fix static files path for gh-pages deploy ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1435](https://github.com/ember-cli/ember-exam/pull/1435) Strict dep management settings + re-roll lockfile, remove addon-docs, add vitepress ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1434](https://github.com/ember-cli/ember-exam/pull/1434) Add prettier to lint, don't run lint with tests ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1431](https://github.com/ember-cli/ember-exam/pull/1431) Upgrade eslint / prettier ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n\n#### Committers: 2\n- Jordan Hawker ([@elwayman02](https://github.com/elwayman02))\n- [@NullVoxPopuli](https://github.com/NullVoxPopuli)\n\n## Release (2025-03-05)\n\nember-exam 9.1.0 (minor)\n\n#### :rocket: Enhancement\n* `ember-exam`\n  * [#1313](https://github.com/ember-cli/ember-exam/pull/1313) Use ember-exam with vite ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n\n#### :house: Internal\n* `ember-exam`\n  * [#1336](https://github.com/ember-cli/ember-exam/pull/1336) Update release-plan ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1333](https://github.com/ember-cli/ember-exam/pull/1333) Fix lints since eslint-plugin-ember was upgraded ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1332](https://github.com/ember-cli/ember-exam/pull/1332) Revert #1188 ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1330](https://github.com/ember-cli/ember-exam/pull/1330) Revert \"Update dependency ember-qunit to v9\" ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1329](https://github.com/ember-cli/ember-exam/pull/1329) Revert \"Update pnpm to v10.5.2\" ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1322](https://github.com/ember-cli/ember-exam/pull/1322) Setup Release plan ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1314](https://github.com/ember-cli/ember-exam/pull/1314) Convert to pnpm ([@NullVoxPopuli](https://github.com/NullVoxPopuli))\n  * [#1289](https://github.com/ember-cli/ember-exam/pull/1289) Add .codeclimate.yml to .npmignore ([@SergeAstapov](https://github.com/SergeAstapov))\n\n#### Committers: 2\n- Sergey Astapov ([@SergeAstapov](https://github.com/SergeAstapov))\n- [@NullVoxPopuli](https://github.com/NullVoxPopuli)\n\n\n\n## v9.0.0 (2023-12-29)\n\n#### :boom: Breaking Change\n* [#1125](https://github.com/ember-cli/ember-exam/pull/1125) Update ember to 5.5, drop Nodes below 18, drop Mocha support ([@andreyfel](https://github.com/andreyfel))\n\n#### :rocket: Enhancement\n* [#963](https://github.com/ember-cli/ember-exam/pull/963) Add preserveTestName CLI flag to remove partition and browser ([@tasha-urbancic](https://github.com/tasha-urbancic))\n\n#### :house: Internal\n* [#1127](https://github.com/ember-cli/ember-exam/pull/1127) Run node tests in CI ([@andreyfel](https://github.com/andreyfel))\n\n#### Committers: 2\n- Andrey Fel ([@andreyfel](https://github.com/andreyfel))\n- Natasha Urbancic ([@tasha-urbancic](https://github.com/tasha-urbancic))\n\n\n## v8.0.0 (2022-01-25)\n\n#### :boom: Breaking Change\n* [#769](https://github.com/ember-cli/ember-exam/pull/769) Drop support for Ember 3.19 and below ([@Turbo87](https://github.com/Turbo87))\n\n#### :house: Internal\n* [#840](https://github.com/ember-cli/ember-exam/pull/840) Upgrade `@embroider/*` packages to 1.0.0 ([@SergeAstapov](https://github.com/SergeAstapov))\n* [#745](https://github.com/ember-cli/ember-exam/pull/745) Upgrade eslint-plugin-ember from v8.9.1 to v10.5.8 ([@SergeAstapov](https://github.com/SergeAstapov))\n* [#813](https://github.com/ember-cli/ember-exam/pull/813) Use `assert.strictEqual()` instead of `assert.equal()` ([@Turbo87](https://github.com/Turbo87))\n* [#775](https://github.com/ember-cli/ember-exam/pull/775) Delete unused `herp-derp` component ([@Turbo87](https://github.com/Turbo87))\n* [#774](https://github.com/ember-cli/ember-exam/pull/774) Migrate dummy app templates to use angle bracket invocation syntax ([@Turbo87](https://github.com/Turbo87))\n* [#740](https://github.com/ember-cli/ember-exam/pull/740) CI: Enable Ember v4 scenarios again ([@Turbo87](https://github.com/Turbo87))\n* [#768](https://github.com/ember-cli/ember-exam/pull/768) Upgrade `ember-cli-addon-docs` dependency ([@Turbo87](https://github.com/Turbo87))\n* [#766](https://github.com/ember-cli/ember-exam/pull/766) CI: Disable failing `ember-release` scenario ([@Turbo87](https://github.com/Turbo87))\n* [#748](https://github.com/ember-cli/ember-exam/pull/748) Add eslint-plugin-qunit per latest addon blueprint ([@SergeAstapov](https://github.com/SergeAstapov))\n* [#744](https://github.com/ember-cli/ember-exam/pull/744) Update npmignore file ([@Turbo87](https://github.com/Turbo87))\n\n#### Committers: 3\n- Sergey Astapov ([@SergeAstapov](https://github.com/SergeAstapov))\n- Stephen Yeung ([@step2yeung](https://github.com/step2yeung))\n- Tobias Bieniek ([@Turbo87](https://github.com/Turbo87))\n\n\n## v7.0.1 (2021-11-02)\n#### :bug: Bug Fix\n* [#760](https://github.com/ember-cli/ember-exam/pull/760) Wait for all browser to completet beforer cleaning up StateManager([@step2yeung](https://github.com/step2yeung))\n* [#750](https://github.com/ember-cli/ember-exam/pull/750) Ember exam failing when browser ID not found, return 0([@step2yeung](https://github.com/step2yeung))\n\n#### :house: Internal\n* [#748](https://github.com/ember-cli/ember-exam/pull/748) Add eslint-plugin-qunit per latest addon blueprint  internal ([@SergeAstapov](https://github.com/SergeAstapov))\n* [#744](https://github.com/ember-cli/ember-exam/pull/744) Update npmignore file internal([@Turbo87](https://github.com/Turbo87))\n#### Committers: 4\n\n- Sergey Astapov ([@SergeAstapov](https://github.com/SergeAstapov))\n- Tobias Bieniek ([@Turbo87](https://github.com/Turbo87))\n- Stephen Yeung ([@step2yeung](https://github.com/step2yeung))\n\n## v7.0.0 (2021-10-22)\n\n#### :boom: Breaking Change\n* [#739](https://github.com/ember-cli/ember-exam/pull/739) Update `ember-auto-import` to v2.x ([@Turbo87](https://github.com/Turbo87))\n* [#690](https://github.com/ember-cli/ember-exam/pull/690) Drop support for Node 10 and upgrade deps ([@nlfurniss](https://github.com/nlfurniss))\n\n#### :bug: Bug Fix\n* [#688](https://github.com/ember-cli/ember-exam/pull/688) Fix embroider tests ([@nlfurniss](https://github.com/nlfurniss))\n\n#### :memo: Documentation\n* [#687](https://github.com/ember-cli/ember-exam/pull/687) Update README.md: Fix typo in flag name ([@bantic](https://github.com/bantic))\n* [#644](https://github.com/ember-cli/ember-exam/pull/644) Docs: Fix information on Load Balancing ([@brkn](https://github.com/brkn))\n\n#### :house: Internal\n* [#743](https://github.com/ember-cli/ember-exam/pull/743) CI: Add `release` workflow ([@Turbo87](https://github.com/Turbo87))\n* [#737](https://github.com/ember-cli/ember-exam/pull/737) Use `prettier` to format JS files ([@Turbo87](https://github.com/Turbo87))\n* [#736](https://github.com/ember-cli/ember-exam/pull/736) CI: Disable Ember.js v4 scenarios ([@Turbo87](https://github.com/Turbo87))\n* [#689](https://github.com/ember-cli/ember-exam/pull/689) Set ember edition to Octane to quiet build logging ([@nlfurniss](https://github.com/nlfurniss))\n\n#### Committers: 4\n- Berkan Ünal ([@brkn](https://github.com/brkn))\n- Cory Forsyth ([@bantic](https://github.com/bantic))\n- Nathaniel Furniss ([@nlfurniss](https://github.com/nlfurniss))\n- Tobias Bieniek ([@Turbo87](https://github.com/Turbo87))\n\n\n## v6.1.0 (2021-02-17)\n\n#### :rocket: Enhancement\n* [#652](https://github.com/ember-cli/ember-exam/pull/652) Update to support `ember-qunit@5` ([@thoov](https://github.com/thoov))\n\n#### Committers: 1\n- Travis Hoover ([@thoov](https://github.com/thoov))\n\n\n## v6.0.1 (2020-10-28)\n\n#### :bug: Bug Fix\n* [#617](https://github.com/ember-cli/ember-exam/pull/617) Update @embroider/macros to fix ember-qunit@5.0.0-beta support. ([@rwjblue](https://github.com/rwjblue))\n\n#### :house: Internal\n* [#618](https://github.com/ember-cli/ember-exam/pull/618) Swap to GitHub actions for CI. ([@rwjblue](https://github.com/rwjblue))\n\n#### Committers: 2\n- Robert Jackson ([@rwjblue](https://github.com/rwjblue))\n- [@dependabot-preview[bot]](https://github.com/apps/dependabot-preview)\n\n\n## v6.0.0 (2020-10-12)\n\n#### :boom: Breaking Change\n* [#615](https://github.com/ember-cli/ember-exam/pull/615) Drop Node 13 support. ([@rwjblue](https://github.com/rwjblue))\n* [#600](https://github.com/ember-cli/ember-exam/pull/600) Drop Node 11 support. ([@thoov](https://github.com/thoov))\n\n#### :rocket: Enhancement\n* [#599](https://github.com/ember-cli/ember-exam/pull/599) Embroider support when `staticAddonTestSupportTrees` enabled ([@thoov](https://github.com/thoov))\n\n#### :bug: Bug Fix\n* [#410](https://github.com/ember-cli/ember-exam/pull/410) Fail if parallel is not a numeric value ([@step2yeung](https://github.com/step2yeung))\n\n#### :memo: Documentation\n* [#612](https://github.com/ember-cli/ember-exam/pull/612) Update README.md ([@jrowlingson](https://github.com/jrowlingson))\n* [#588](https://github.com/ember-cli/ember-exam/pull/588) Add note about `--random` and `--load-balance` ([@kellyselden](https://github.com/kellyselden))\n\n#### :house: Internal\n* [#614](https://github.com/ember-cli/ember-exam/pull/614) Update release automation setup. ([@rwjblue](https://github.com/rwjblue))\n* [#604](https://github.com/ember-cli/ember-exam/pull/604) Fixing bad yarn lock merge ([@thoov](https://github.com/thoov))\n* [#600](https://github.com/ember-cli/ember-exam/pull/600) Fix test suite to run mocha variants during CI ([@thoov](https://github.com/thoov))\n\n#### Committers: 6\n- Jack Rowlingson ([@jrowlingson](https://github.com/jrowlingson))\n- Kelly Selden ([@kellyselden](https://github.com/kellyselden))\n- Robert Jackson ([@rwjblue](https://github.com/rwjblue))\n- Stephen Yeung ([@step2yeung](https://github.com/step2yeung))\n- Travis Hoover ([@thoov](https://github.com/thoov))\n- [@dependabot-preview[bot]](https://github.com/apps/dependabot-preview)\n\n\nv5.0.1 / 2020-04-21\n===================\n* Bump fs-extra from 8.1.0 to 9.0.0 <dependabot[bot]>\n* Bump sinon from 7.5.0 to 9.0.2 <dependabot[bot]>\n* Bump cli-table3 from 0.5.1 to 0.6.0 <dependabot[bot]>\n* Bump ember-resolver from 6.0.2 to 8.0.0 <dependabot[bot]>\n* Bump ember-source from 3.17.2 to 3.18.0 <dependabot[bot]>\n* Bump semver from 7.1.3 to 7.3.2 <dependabot[bot]>\n* Bump eslint-plugin-node from 11.0.0 to 11.1.0 <dependabot[bot]>\n* Bump semver from 7.1.3 to 7.3.2 <dependabot[bot]>\n* Bump ember-template-lint from 2.4.1 to 2.5.2 <dependabot[bot]>\n* Bump mocha from 7.1.0 to 7.1.1 <dependabot[bot]>\n* Bump testdouble from 3.13.0 to 3.13.1 <dependabot[bot]>\n* Bump nyc from 15.0.0 to 15.0.1 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 4.2.2 to 4.3.0 <dependabot[bot]>\n* Bump ember-cli from 3.16.0 to 3.17.0 <dependabot[bot]>\n* Bump ember-cli-babel from 7.18.0 to 7.19.0 <dependabot[bot]>\n* Bump ember-source from 3.17.1 to 3.17.2 <dependabot[bot]>\n* Bump ember-template-lint from 2.4.0 to 2.4.1  <dependabot[bot]>\n* Bump ember-source from 3.17.0 to 3.17.1 <dependabot[bot]>\n* Bump eslint-plugin-ember from 7.10.1 to 7.11.1 <dependabot[bot]>\n* Bump eslint-plugin-ember from 7.9.0 to 7.10.1 <dependabot[bot]>\n* Bump ember-template-lint from 2.3.0 to 2.4.0 <dependabot[bot]>\n\n\nv5.0.0 / 2020-03-06\n===================\n* [Enhancement] Update docs for ember-cli-addon-docs (@choheekim)\n* [Enhancement] Update node engine to be above 10 (@choheekim)\n* [Enhancement] Enables to execute completeBrowserHandler() when there is browser(s) failed to attach to server (@choheekim)\n* [Enhancement] _getTestFramework checks for ember-mocha package (@choheekim)\n* [Enhancement] updating header comments to fix warnings during \"ember build\" (@dcombslinkedin)\n* [BugFix] fix invalid ES module usage (@ef3)\n* Bump ember-source from 3.16.3 to 3.17.0 <dependabot[bot]>\n* Bump ember-template-lint from 1.14.0 to 2.3.0 <dependabot[bot]>\n* Bump eslint-plugin-ember from 7.8.1 to 7.9.0 <dependabot[bot]>\n* Bump testdouble from 3.12.5 to 3.13.0 <dependabot[bot]>\n* Bump mocha from 7.0.1 to 7.1.0 <dependabot[bot]>\n* Bump ember-template-lint from 1.13.2 to 1.14.0 <dependabot[bot]>\n* Bump ember-source from 3.14.3 to 3.16.3 <dependabot[bot]>\n* Bump semver from 7.1.2 to 7.1.3 <dependabot[bot]>\n* Bump ember-cli from 3.15.2 to 3.16.0 <dependabot[bot]>\n* Bump ember-cli-babel from 7.17.1 to 7.18.0 <dependabot[bot]>\n* Bump eslint-plugin-ember from 7.7.2 to 7.8.1 <dependabot[bot]>\n* Bump rimraf from 3.0.1 to 3.0.2 <dependabot[bot]>\n* Bump ember-cli-babel from 7.14.1 to 7.17.1 <dependabot[bot]>\n* Bump semver from 6.3.0 to 7.1.2 <dependabot[bot]>\n* Bump mocha from 6.2.2 to 7.0.1 <dependabot[bot]>\n* Bump ember-cli-babel from 7.13.2 to 7.14.1 <dependabot[bot]>\n* Bump rimraf from 3.0.0 to 3.0.1 <dependabot[bot]>\n* Bump ember-cli from 3.15.1 to 3.15.2 <dependabot[bot]>\n* Bump ember-cli-addon-docs-yuidoc from 0.2.3 to 0.2.4 <dependabot[bot]>\n* Bump ember-template-lint from 1.13.0 to 1.13.2 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 4.2.1 to 4.2.2 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 4.2.0 to 4.2.1 <dependabot[bot]>\n* Bump eslint-plugin-node from 10.0.0 to 11.0.0 <dependabot[bot]>\n* Bump nyc from 14.1.1 to 15.0.0 <dependabot[bot]>\n* Bump ember-resolver from 6.0.0 to 6.0.1 <dependabot[bot]>\n* Bump ember-template-lint from 1.11.1 to 1.12.1 <dependabot[bot]>\n* Bump eslint-plugin-ember from 7.7.1 to 7.7.2 <dependabot[bot]>\n* Bump ember-cli-babel from 7.13.0 to 7.13.2 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 4.1.0 to 4.2.0 <dependabot[bot]>\n* Bump ember-try from 1.3.0 to 1.4.0 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 4.0.9 to 4.1.0 <dependabot[bot]>\n* Bump ember-template-lint from 1.9.0 to 1.10.0 <dependabot[bot]>\n\n\nv4.0.9 / 2019-12-05\n===================\n* [Enhancement] Add a number of total tests, failed tests, passed tests, and skipped tests to a module metadata file (@choheekim)\n* [Enhancement] Update README.md corresponding to changes in the module metadata file contents (@choheekim)\n* [BugFix] Update yarn.lock to use latest version of core-js-compat (v.3.4.7) (@choheekim)\n* [BugFix] Fix process validation when registering callbacks for process.error & process.exit (@choheekim)\n* Bump ember-template-lint from 1.6.0 to 1.6.1 <dependabot[bot]>\n* Bump ember-qunit from 4.5.1 to 4.6.0 <dependabot[bot]>\n* Bump eslint-plugin-ember from 7.2.0 to 7.3.0 <dependabot[bot]>\n* Bump ember-load-initializers from 2.1.0 to 2.1.1 <dependabot[bot]>\n* Bump ember-template-lint from 1.6.1 to 1.8.1 <dependabot[bot]>\n* Bump ember-source from 3.13.3 to 3.14.1 <dependabot[bot]>\n* Bump chalk from 2.4.2 to 3.0.0 <dependabot[bot]>\n* Bump ember-export-application-global from 2.0.0 to 2.0.1 <dependabot[bot]>\n* Bump ember-template-lint from 1.8.1 to 1.8.2 <dependabot[bot]>\n* Bump ember-cli from 3.13.1 to 3.14.0 <dependabot[bot]>\n* Bump ember-resolver from 5.3.0 to 6.0.0  <dependabot[bot]>\n* Bump ember-cli-babel from 7.12.0 to 7.13.0 <dependabot[bot]>\n* Bump execa from 3.3.0 to 3.4.0 <dependabot[bot]>\n* Bump ember-source from 3.14.2 to 3.14.3 <dependabot[bot]>\n* Bump ember-template-lint from 1.8.2 to 1.9.0 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 4.0.8 to 4.0.9 <dependabot[bot]>\n\n\nv4.0.5 / 2019-10-25\n===================\n* [BugFix] Validate process object is defined when registering event callbacks for process.error & process.exit (@choheekim)\n* [BugFix] Updates page title for dummy app to \"Ember Exam\" (@howie)\n* Bump rimraf from 2.7.1 to 3.0.0 <dependabot[bot]>\n* Bump ember-cli from 3.12.0 to 3.13.1 <dependabot[bot]>\n* Bump ember-cli-deploy-build from 1.1.1 to 2.0.0 <dependabot[bot]>\n* Bump mocha from 6.2.0 to 6.2.2 <dependabot[bot]>\n* Bump ember-template-lint from 1.5.3 to 1.6.0 <dependabot[bot]>\n* Bump ember-cli-babel from 7.11.1 to 7.12.0 <dependabot[bot]>\n* Bump ember-cli-inject-live-reload from 2.0.1 to 2.0.2 <dependabot[bot]>\n* Bump @ember/optional-features from 1.0.0 to 1.1.0  <dependabot[bot]>\n* Bump ember-cli-addon-docs from 0.6.14 to 0.6.15 <dependabot[bot]>\n* Bump ember-cli-htmlbars-inline-precompile from 2.1.0 to 3.0.1 <dependabot[bot]>\n* Bump ember-source from 3.10.2 to 3.13.3 <dependabot[bot]>\n* Bump eslint-plugin-node from 8.0.1 to 10.0.0  <dependabot[bot]>\n* Bump @ember/optional-features from 0.7.0 to 1.0.0 <dependabot[bot]>\n* Bump ember-cli-htmlbars from 3.1.0 to 4.0.8 <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.10.1 to 7.2.0 <dependabot[bot]>\n\n\nv4.0.4 / 2019-09-30\n===================\n* [BugFix] Validate testem object is defined (@choheekim)\n* Bump ember-cli-babel from 7.11.0 to 7.11.1 <dependabot[bot]>\n\n\nv4.0.3 / 2019-09-24\n===================\n* [Feature] Introduce write-module-metadata-file (@choheekim)\n* Bump ember-resolver from 5.2.1 to 5.3.0 <dependabot[bot]>\n\n\nv4.0.2 / 2019-09-16\n===================\n* [BugFix] Ensure browserExitHandler is called for global errors (@step2yeung)\n* Bump ember-cli-deploy-git from 1.3.3 to 1.3.4 <dependabot[bot]>\n\n\nv4.0.1 / 2019-09-11\n===================\n* [Enhancement] Improve complete browser book keeping & improve request next module conditions (@step2yeung)\n* Bump sinon from 7.4.0 to 7.4.2 <dependabot[bot]>\n\n\nv4.0.0 / 2019-07-18\n===================\n* [Enhancement] Update to use node version >= 8 (@choheekim)\n* [Enhancement] Throw error when there are no matching tests with a given input by file-path and module-path (@choheekim)\n* [BugFix] Update yarn.lock to use v2.4.1 of ember-cli-addon-docs (@choheekim)\n* Bump ember-source from 3.10.1 to 3.10.2 <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.6.0 to 6.7.0 <dependabot[bot]>\n* Bump semver from 6.1.1 to 6.1.2 <dependabot[bot]>\n* Bump testdouble from 3.12.0 to 3.12.2 <dependabot[bot]>\n\n\nv3.0.3 / 2019-06-18\n===================\n\n* [Feature] Introduce module-path-filter and test-file-path-filter in ember-exam (@choheekim)\n* Bump ember-source from 3.10.0 to 3.10.1 <dependabot[bot]>\n* Bump rsvp from 4.8.4 to 4.8.5 <dependabot[bot]>\n* Bump testdouble from 3.11.0 to 3.12.0 <dependabot[bot]>\n* Bump ember-cli-addon-docs from 0.6.11 to 0.6.13 <dependabot[bot]>\n* Bump ember-template-lint from 1.1.0 to 1.2.0 <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.5.1 to 6.6.0 <dependabot[bot]>\n* Bump ember-cli-babel from 7.7.3 to 7.8.0 <dependabot[bot]>\n\n\nv3.0.2 / 2019-06-03\n===================\n\n* [Enhancement] Update documentation (Add Table of Contents) (@Vasanth-freshworks)\n* [Enhancement] Allow graceful exit when async iterator failes to get a module. Add emberExamExitOnError flag to hard fail (@step2yeung)\n* [BugFix] Remove duplicate nav entry (@samselikoff)\n* Bump ember-cli-addon-docs from 0.6.8 to 0.6.9 <dependabot[bot]>\n* Bump mocha from 6.1.2 to 6.1.3 <dependabot[bot]>\n* Bump ember-cli-addon-docs from 0.6.9 to 0.6.10 <dependabot[bot]>\n* Bump sinon from 7.3.1 to 7.3.2 <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.3.0 to 6.4.1 <dependabot[bot]>\n* Bump ember-source from 3.9.0 to 3.9.1 <dependabot[bot]>\n* [Security] Bump jquery from 3.3.1 to 3.4.0 <dependabot[bot]>\n* Bump nyc from 13.3.0 to 14.1.1 <dependabot[bot]>\n* Bump ember-source from 3.9.1 to 3.10.0 <dependabot[bot]>\n* Bump fs-extra from 7.0.1 to 8.0.1 <dependabot[bot]>\n* Bump mocha from 6.1.3 to 6.1.4 <dependabot[bot]>\n* Bump ember-cli-addon-docs from 0.6.10 to 0.6.11 <dependabot[bot]>\n* Bump semver from 6.0.0 to 6.1.0 <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.4.1 to 6.5.0 <dependabot[bot]>\n* Bump ember-try from 1.1.0 to 1.2.1 <dependabot[bot]>\n* Bump semver from 6.1.0 to 6.1.1 <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.5.0 to 6.5.1 <dependabot[bot]>\n\n\nv3.0.1 / 2019-04-09\n===================\n\n* [Enhancement] Update documentation (@step2yeung)\n\n\nv3.0.0 / 2019-04-08\n===================\n\n* [Feature - Breaking] Introduce TestLoadBalancing (@choheekim) & (@step2yeung)\n\nYou will need to **replace** the use of `start()` from `Ember-Qunit` or `Ember-Mocha` in `test-helper.js` with `start()` from `ember-exam`:\n\n```js\n// test-helper.js\nimport start from 'ember-exam/test-support/start';\n\n// Options passed to `start` will be passed-through to ember-qunit or ember-mocha\nstart();\n```\n\nThis breaking change was motivated by wanting to remove the monkey-patching, of ember-qunit and ember-mocha's test-loader, ember exam was doing.\n\n* [Bugfix] Ensure serialized test-execution browserId's are always treated as a string https://github.com/ember-cli/ember-exam/pull/233\n* [Bugfix] fix breaking change: https://github.com/ember-cli/ember-exam/pull/242 (@step2yeung)\n* [Enhancement] Prettify test-execution.json (@step2yeung)\n* Bump ember-qunit from 4.4.0 to 4.4.1 (4 weeks ago) <dependabot[bot]>\n* Bump ember-resolver from 5.1.2 to 5.1.3 (4 weeks ago) <dependabot[bot]>\n* Bump testdouble from 3.10.0 to 3.11.0 (4 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.3 to 7.5.0 (4 weeks ago) <dependabot[bot]>\n* Bump ember-resolver from 5.1.1 to 5.1.2 (5 weeks ago) <dependabot[bot]>\n* Bump mocha from 6.0.0 to 6.0.1 (5 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.2 to 7.4.3 (5 weeks ago) <dependabot[bot]>\n* Bump ember-qunit from 4.3.0 to 4.4.0 (5 weeks ago) <dependabot[bot]>\n* Bump mocha from 5.2.0 to 6.0.0 (5 weeks ago) <dependabot[bot]>\n* Bump ember-source from 3.7.3 to 3.8.0 (5 weeks ago) <dependabot[bot]>\n* Bump sinon from 7.2.3 to 7.2.4 (5 weeks ago) <dependabot[bot]>\n* Bump nyc from 13.2.0 to 13.3.0 (6 weeks ago) <dependabot[bot]>\n* [Security] Bump handlebars from 4.0.12 to 4.1.0 (6 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.1 to 7.4.2 (6 weeks ago) <dependabot[bot]>\n* Bump ember-source from 3.7.2 to 3.7.3 (7 weeks ago) <dependabot[bot]>\n* Bump ember-qunit from 4.2.0 to 4.3.0 (7 weeks ago) <dependabot[bot]>\n* Bump nyc from 13.1.0 to 13.2.0 (7 weeks ago) <dependabot[bot]>\n* Bump testdouble from 3.9.3 to 3.10.0 (7 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.0 to 7.4.1 (8 weeks ago) <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.1.0 to 6.2.0 (8 weeks ago) <dependabot[bot]>\n\n\nv2.1.5 / 2019-04-08\n===================\n\n* re-release 2.0.3 as 2.1.5, as 2.0.4...2.1.4 introduced a worth-while but unexpected breaking change. 2.0.4...2.1.4 will be re-released as 3.x\n\n\nv2.1.4 / 2019-03-27\n===================\n\n* [Bugfix] Ensure serialized test-execution browserId's are always treated as a string https://github.com/ember-cli/ember-exam/pull/233\n\nv2.1.3 / 2019-03-27\n===================\n\n* [Bugfix] fix breaking change: https://github.com/ember-cli/ember-exam/pull/242 (@step2yeung)\n* [Enhancement] Prettify test-execution.json (@step2yeung)\n\nv2.1.0 / 2019-03-27\n===================\n\n* [Feature] Introduce TestLoadBalancing <@choheekim> & <@step2yeung>\n* Bump ember-qunit from 4.4.0 to 4.4.1 (4 weeks ago) <dependabot[bot]>\n* Bump ember-resolver from 5.1.2 to 5.1.3 (4 weeks ago) <dependabot[bot]>\n* Bump testdouble from 3.10.0 to 3.11.0 (4 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.3 to 7.5.0 (4 weeks ago) <dependabot[bot]>\n* Bump ember-resolver from 5.1.1 to 5.1.2 (5 weeks ago) <dependabot[bot]>\n* Bump mocha from 6.0.0 to 6.0.1 (5 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.2 to 7.4.3 (5 weeks ago) <dependabot[bot]>\n* Bump ember-qunit from 4.3.0 to 4.4.0 (5 weeks ago) <dependabot[bot]>\n* Bump mocha from 5.2.0 to 6.0.0 (5 weeks ago) <dependabot[bot]>\n* Bump ember-source from 3.7.3 to 3.8.0 (5 weeks ago) <dependabot[bot]>\n* Bump sinon from 7.2.3 to 7.2.4 (5 weeks ago) <dependabot[bot]>\n* Bump nyc from 13.2.0 to 13.3.0 (6 weeks ago) <dependabot[bot]>\n* [Security] Bump handlebars from 4.0.12 to 4.1.0 (6 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.1 to 7.4.2 (6 weeks ago) <dependabot[bot]>\n* Bump ember-source from 3.7.2 to 3.7.3 (7 weeks ago) <dependabot[bot]>\n* Bump ember-qunit from 4.2.0 to 4.3.0 (7 weeks ago) <dependabot[bot]>\n* Bump nyc from 13.1.0 to 13.2.0 (7 weeks ago) <dependabot[bot]>\n* Bump testdouble from 3.9.3 to 3.10.0 (7 weeks ago) <dependabot[bot]>\n* Bump ember-cli-babel from 7.4.0 to 7.4.1 (8 weeks ago) <dependabot[bot]>\n* Bump eslint-plugin-ember from 6.1.0 to 6.2.0 (8 weeks ago) <dependabot[bot]>\n\nv2.0.3 / 2019-01-22\n===================\n\n* ignore .nyc_output\n\nv2.0.2 / 2019-01-22\n===================\n\n* Bump chalk from 2.4.1 to 2.4.2\n* Bump debug from 4.1.0 to 4.1.1\n* Bump ember-cli from 3.5.1 to 3.7.1\n* Bump ember-cli-babel from 7.1.4 to 7.4.0\n* Bump ember-cli-dependency-checker from 3.0.0 to 3.1.0\n* Bump ember-cli-htmlbars-inline-precompile from 2.0.0 to 2.1.0\n* Bump ember-qunit from 4.1.2 to 4.2.0\n* Bump ember-source from 3.6.0 to 3.7.2\n* Bump ember-template-lint from 0.8.23 to 1.1.0\n* Bump eslint-plugin-ember from 6.0.1 to 6.1.0\n* Bump eslint-plugin-node from 8.0.0 to 8.0.1\n* Bump rimraf from 2.6.2 to 2.6.3\n* Bump sinon from 7.1.1 to 7.2.3\n* Bump testdouble from 3.9.1 to 3.9.3\n* Run test:all to trigger ember & node test in ci, add missing single quote, and change number of tests running\n* `setResolver()` from `@ember/test-helpers`\n\nv2.0.1 / 2018-12-07\n===================\n\n  * ember-exam now sets `process.env.EMBER_EXAM_SPLIT_COUNT`, this allows testem scripts to pick up this configuration via `parallel: process.env.EMBER_EXAM_SPLIT_COUNT`\n\nv2.0.0 / 2018-12-04\n===================\n\n  * Bump Node support to: ^6.14.0 || ^8.10.0 || >= 10.*\n  * Update/Modernize all dependencies\n  * Update/Modernize codebase\n  * tranisition from ember-cli-qunit to ember-qunit\n\nv1.0.0 / 2017-11-02\n==================\n\n==================\n\n  * Remove auto-loading functionality\n  * Update readme to better emphasize explicit loading\n\nv0.8.1 / 2017-10-08\n==================\n\n  * Warn when auto-loading (deprecation)\n  * Remove `#` from test output.\n\nv0.8.0 / 2017-10-04\n==================\n\n  * Removed EMBER_TRY_SCENARIO's from .travis.yml file\n  * Fix ESLint warning\n  * Fix mocha integration\n  * Revert `npm install` command in .travis.yml\n  * Upgrade all dependencies version\n  * Upgrade Ember CLI to version 2.15 and align with default blueprint\n\nv0.7.2 / 2017-10-01\n==================\n\n  * fixes #109 - use local ember\n\nv0.7.1 / 2017-09-14\n==================\n\n  * Make notes about turning on parallelization more visible\n  * Move note on >= 0.7.0 into installation section\n  * Add installation instructions\n  * Remove jQuery usage\n  * Specify when to call loadEmberExam when using ember-cli-qunit@4\n  * fix version range\n  * Add release process notes\n\nv0.7.0 / 2017-06-01\n==================\n\n  * Document load API for version 0.7.0\n  * Fix eslint errors for node-land code\n  * Refactor core functionality\n  * Extract TestLoader mods into utility function\n  * Simplify and revamp code coverage\n  * Fix tests from ESLint migration\n  * Replace JSHint with ESLint\n  * Tweak CI configs\n  * Change ember try:one -> ember try:each\n  * Remove Node 0.12 from Travis\n  * Add Node LTS versions 4.x, 6.x, and stable to Travis\n\nv0.6.2 / 2017-04-09\n==================\n\n  * Downgrade split < 2 error to warning\n  * Fix mocha test commands\n\n\nv0.6.1 / 2017-03-25\n===================\n\n  * Ensure iterate exits with proper code\n  * Add Ember Exam video link to Readme\n  * Add note about using random with a seed\n  * Fix seed logging message for random option\n\nv0.6.0 / 2016-11-27\n===================\n\n  * Close code coverage gap\n  * Update README to include Mocha info\n  * Add framework-specific logic\n  * Run both Mocha and QUnit tests in CI\n  * Add tests for ember-cli-mocha\n  * Remove moduleForAcceptance\n  * Move QUnit-based tests to sub-directory\n  * Remove reliance on QUnit for handling url params\n\nv0.5.3 / 2016-11-19\n===================\n\n  * Fixed issue with using a single partition with a double digit\n\nv0.5.2 / 2016-11-15\n===================\n\n  * Support specifying multiple partitions (#63)\n\nv0.5.1 / 2016-11-14\n===================\n\n  * move rimraf to dependencies from devDependencies\n  * Add note about test splitting balancing\n\nv0.5.0 / 2016-08-14\n===================\n\n  * Document randomization-iterator\n  * Add tests for randomization-iterator\n  * Rename main acceptance test to be semantic\n  * Introduce exam:iterate command\n  * Tighten up npmignore\n  * Clarify README typos\n  * Increase mass threshold for code climate\n  * Improve acceptance test coverage\n  * Improve advanced configuration section of readme\n\nv0.4.6 / 2016-08-07\n===================\n\n  * Don't run Travis on non-master branches\n  * Read in testem config for constructing test page urls\n\nv0.4.5 / 2016-08-03\n===================\n\n  * Fix node tests after core-object changes\n  * Fix tests of ember-exam in 2.7\n  * Upgrade all deps to align with Ember 2.7.0.\n  * Temporarily undocument `--weighted`.\n  * Setup and document ember-try integration\n\nv0.4.4 / 2016-06-21\n===================\n\n  * Remove unused dependencies\n  * Make codeclimate and eslint configs local\n  * Make requires lazy where possible\n  * Remove unused Array utilities\n  * Add CodeClimate badges to README\n  * Setup Istanbul code coverage for node code\n  * Fix issues found via CodeClimate\n  * Fix Travis badge to point to master\n  * Add additional badges to README\n\nv0.4.3 / 2016-06-05\n===================\n\n  * Add Acceptance test for Testem output\n  * Add partition number to Testem output only when applicable\n  * Handle _split and _partition params as strings\n  * Fix typo, partition -> _partition\n\nv0.4.2 / 2016-06-02\n===================\n\n  * Introduce tests for TestLoader\n  * Add useful errors to TestLoader\n  * Don't fail when lint tests are disabled\n\nv0.4.1 / 2016-05-24\n===================\n\n  * Fix super callbacks context\n\nv0.4.0 / 2016-05-24\n===================\n\n  * Remove AST manipulations and refine API\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# How To Contribute\n\n## Installation\n\n- `git clone https://github.com/ember-cli/ember-exam.git`\n- `cd ember-exam`\n- `yarn install`\n\n## Linting\n\n- `yarn lint:hbs`\n- `yarn lint:js`\n- `yarn lint:js --fix`\n\n## Running tests\n\n- `yarn test:ember` – Runs the test suite on the current Ember version\n- `yarn test:ember --server` – Runs the test suite in \"watch mode\"\n- `yarn test:node` - Runs the node tests\n- `yarn test:all` – Runs the test suite against multiple Ember versions\n\n## Running the dummy application\n\n- `yarn start`\n- Visit the dummy application at [http://localhost:4200](http://localhost:4200).\n\nFor more information on using ember-cli, visit [https://cli.emberjs.com/release/](https://cli.emberjs.com/release/).\n\n## Debugging testem\n\nTerminal 1\n```bash\npnpm ember exam --load-balance --path ./dist --parallel 2 --testem-debug testem.log\n```\nTerminal 2\n```bash\ntail -f testem.log\n```\n"
  },
  {
    "path": "LICENSE.md",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2015\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Ember Exam\n![Build Status](https://github.com/ember-cli/ember-exam/actions/workflows/ci.yml/badge.svg?event=push)\n[![NPM Version](https://badge.fury.io/js/ember-exam.svg)](https://badge.fury.io/js/ember-exam)\n[![Ember Observer Score](https://emberobserver.com/badges/ember-exam.svg)](https://emberobserver.com/addons/ember-exam)\n\nEmber Exam is an addon to allow you more control over how you run your tests when used in conjunction with [ember-qunit](https://github.com/emberjs/ember-qunit). It provides the ability to randomize, split, parallelize, and load-balance your test suite by adding a more robust CLI command.\n\nIt started as a way to help reduce flaky tests and encourage healthy test driven development. It's like [Head & Shoulders](http://www.headandshoulders.com/) for your tests!\n\n[![Introduction to Ember Exam](https://cloud.githubusercontent.com/assets/2922250/22800360/157ad67c-eed7-11e6-8d33-d2c59238c7f1.png)](https://embermap.com/video/ember-exam)\n\nThe [documentation website](https://ember-cli.github.io/ember-exam/) contains examples and API information.\n\n## Table of Contents\n\n- [Compatibility](#compatibility)\n- [Installation](#installation)\n- [How To Use](#how-to-use)\n  * [Version < `3.0.0`](#version--300)\n  * [Randomization](#randomization)\n    + [Randomization Iterator](#randomization-iterator)\n  * [Splitting](#splitting)\n    + [Split Test Parallelization](#split-test-parallelization)\n  * [Test Load Balancing](#test-load-balancing)\n      - [Test Failure Reproduction](#test-failure-reproduction)\n  * [Preserve Test Name](#preserve-test-name)\n- [Advanced Configuration](#advanced-configuration)\n  * [Ember Try & CI Integration](#ember-try--ci-integration)\n  * [Test Suite Segmentation](#test-suite-segmentation)\n  * [Exceeding Browser Timeout](#exceeding-browser-timeout)\n\n## Compatibility\n\n* Ember.js v4.8 or above\n* Ember CLI v4.8 or above\n* Node.js v18 or above\n\n## Installation\n\nInstallation is as easy as running:\n\n```bash\n$ npm install --save-dev ember-exam\n```\n\n## How To Use\n\nUsing Ember Exam is fairly straightforward as it extends directly from the default Ember-CLI `test` command. So, by default, it will work exactly the same as `ember test`.\n\n```bash\n$ ember exam\n$ ember exam --filter='acceptance'\n$ ember exam --server\n$ ember exam --load-balance --parallel=1\n```\n\nFor more information and examples, please visit the [documentation website](https://ember-cli.github.io/ember-exam/).\n```bash\n# A value of filter is acceptance\n$ ember exam --filter 'acceptance'\n\n# A value of parallel is 2\n$ ember exam --load-balance --parallel=2 --server\n\n# If a `=` is not used to pass a value to an option that requires a value, it will take anything passed after a space as it's value\n# In this instance, the value of parallel is --server\n$ ember exam --load-balance --parallel --server\n```\n\nThe idea is that you can replace `ember test` with `ember exam` and never look back.\n\nTo get the unique features of Ember Exam (described in-depth below), you will need to **replace** the use of `start()` from `ember-qunit` in `test-helper.js` with `start()` from `ember-exam`:\n\n```js\n// test-helper.js\n- import { start, setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { start } from 'ember-exam/test-support';\n\n// Options passed to `start` will be passed-through to ember-qunit\nstart();\n```\n\n## How to use with Vite\n\nAll of the above applies, but we need to tell vite to build the app before telling ember/exam to run tests on that output.\n\nUpdate your test-helper.js to call the ember-exam `start` function:\n```diff\n  // ...\n  import { setApplication } from '@ember/test-helpers';\n  import { setup } from 'qunit-dom';\n- import { start as qunitStart, setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { start as startEmberExam } from 'ember-exam/addon-test-support';\n\n- export function start() {\n+ export async function start(options) {\n    setApplication(Application.create(config.APP));\n\n    setup(QUnit.assert);\n    setupEmberOnerrorValidation();\n\n-   qunitStart();\n+   // Options passed to `start` will be passed-through to ember-qunit\n+   await startEmberExam(options);\n  }\n```\n\nor if you have a test-helper.ts:\n```diff\n  // ...\n  import { setApplication } from '@ember/test-helpers';\n  import { setup } from 'qunit-dom';\n- import { start as qunitStart, setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { setupEmberOnerrorValidation } from 'ember-qunit';\n+ import {\n+   start as startEmberExam,\n+   type EmberExamStartOptions,\n+ } from 'ember-exam/addon-test-support';\n\n- export function start() {\n+ export async function start(options: EmberExamStartOptions) {\n    setApplication(Application.create(config.APP));\n\n    setup(QUnit.assert);\n    setupEmberOnerrorValidation();\n\n-   qunitStart();\n+   // Options passed to `start` will be passed-through to ember-qunit\n+   await startEmberExam(options);\n  }\n```\n\nThen, update your tests/index.html to pass availableModules to start:\n```html\n<script type=\"module\">\n  import { start } from './test-helper.js';\n\n  const availableModules = {\n    ...import.meta.glob('./application/**/*-test.{js,ts,gjs,gts}'),\n    ...import.meta.glob('./rendering/**/*-test.{js,ts,gjs,gts}'),\n    ...import.meta.glob('./unit/**/*-test.{js,ts,gjs,gts}'),\n  };\n\n\tstart({ availableModules });\n</script>\n```\n\n\nTesting development:\n```bash \nNODE_ENV=development vite build --mode development\nember exam --path dist --config-file ./testem.cjs\n```\n\nTesting production:\n```bash\nvite build --mode test\nember exam --path dist --config-file ./testem.cjs\n```\n\n> [!NOTE]\n> Specifying the `--path` is important because otherwise ember-cli will try to build your vite app, and it will error. \n\n> [!NOTE]\n> Specifying the `--config-path` is important because ember-cli (what backs ember-exam) doesn't know about cjs files. \n\n\n### Version < `3.0.0`\n\n\nPrior to `2.1.0`, Ember Exam must be loaded by importing `addon-test-support/load.js` and calling `loadEmberExam`:\n\n```js\n// test-helper.js\nimport loadEmberExam from 'ember-exam/test-support/load';\n\nloadEmberExam();\n\n```\n\n### Randomization\n\n```bash\n$ ember exam --random[=<seed>]\n```\n\nThe `random` option allows you to randomize the order in which your tests run. You can optionally specify a \"seed\" value from which to randomize your tests in order to reproduce results. The seed can be any string value. Regardless of whether you specify a seed or not, Ember Exam will log the seed value used for the randomization at the beginning of the test run:\n\n```bash\n$ ember exam --random\n$ Randomizing tests with seed: liv5d1ixkco6qlatl6o7mbo6r\n\n$ ember exam --random=this_is1337\n$ Randomizing tests with seed: this_is1337\n```\n\nIf you use `random` without specifying a seed, it must be the last argument you pass. Otherwise, Ember Exam will attempt to interpret any following arguments as the seed value. In other words:\n\n```bash\n# don't do this\nember exam --random --split=2\nRandomizing tests with seed: --split=2 # this is not what we wanted\n\n# do this instead\nember exam --split=2 --random\nRandomizing tests with seed: hwr74nkk55vzpvi\n```\n\n_Note: You must be using QUnit version `1.23.0` or greater for this feature to work properly.\n\n#### Randomization Iterator\n\nRandomization can be helpful for identifying non-atomic or order-dependent tests. To that end, Ember Exam provides an iterator to make it easy to test lots of variations in your test suite order quickly.\n\n```bash\n$ ember exam:iterate <num>\n```\n\nThis command will build your application once, and then run the test suite with the `random` option for the specified number of iterations. You can optionally skip the build by using a previous build via the `path` option:\n\n```bash\n$ ember exam:iterate <num> --path <build-path>\n```\n\nFinally, you can pass additional options through to the exam command used to run the tests via the `options` flag:\n\n```bash\n$ ember exam:iterate <num> --options <options>\n```\n\nThe `options` should be a string matching what you would use via the CLI.\n\n### Generating Module Metadata File For Test Execution\n\n```bash\n$ ember exam --write-module-metadata-file\n$ ember exam --wmmf\n```\n\nThe `--write-module-metadata-file`, `wmmf` as an alias, allows you to generate a module metadata file after a test run. The file provides metadata about the test modules executed.\n\nIt creates a json file, `module-metadata-<timestamp>.json`, which contains an array of elements representing metadata of modules executed by sorted by ascending order:\n```json\n[\n  {\n    \"moduleName\": \"Module-name\",\n    \"total\": \"Total number of tests in the module\",\n    \"passed\": \"A number of passed tests in the module\",\n    \"failed\": \"A number of failed tests in the module\",\n    \"skipped\": \"A number of skipped tests in the module\",\n    \"duration\": \"ms in Total duration to execute the module\",\n    \"failedTests\": \"A list of failed tests\"\n  }\n]\n```\n\nand it looks something like below:\n```json\n[\n  {\n    \"moduleName\": \"Slowest-module\",\n    \"total\": 12,\n    \"passed\": 9,\n    \"failed\": 1,\n    \"skipped\": 2,\n    \"duration\": 153,\n    \"failedTests\": [\"failed-test-1\"]\n  },\n  {\n    \"moduleName\": \"Fastest-module\",\n    \"total\": 2,\n    \"passed\": 1,\n    \"failed\": 0,\n    \"skipped\": 0,\n    \"duration\": 123,\n    \"failedTests\": []\n  }\n]\n```\n\n\n### Splitting\n\n```bash\n$ ember exam --split=<num>\n```\n\nThe `split` option allows you to specify the number of partitions greater than one to spread your tests across. Ember Exam will then proceed to run the first batch of tests.\n\n```bash\n$ ember exam --split=<num> --partition=<num>\n```\n\nThe `partition` option allows you to specify which test group to run after using the `split` option. It is one-indexed, so if you specify a split of 3, the last group you could run is 3 as well. You can also run multiple partitions, e.g.:\n\n```bash\n$ ember exam --split=4 --partition=1 --partition=2\n```\n\n_Note: Ember Exam splits tests by modifying the ember-qunit's `TestLoader` to bucket each test file into a partition, where each partition has an even number of test files. This makes it possible to have unbalanced partitions. To run your tests with balanced partitions, consider using `--load-balance`. For more info, see [_Test Load Balancing_](#test-load-balancing).\n\n#### Split Test Parallelization\n\n```bash\n$ ember exam --split=<num> --parallel\n```\n\nThe `parallel` option allows you to run your split tests across multiple test pages in parallel in [Testem](https://github.com/testem/testem). It will use a separate browser instance for each group of tests. So, if you specify a split of 3, then 3 browser instances will be spawned with the output looking something like:\n\n```bash\nok 1 PhantomJS 1.9 - Exam Partition 1 - some test\nok 2 PhantomJS 1.9 - Exam Partition 3 - some other other test\nok 3 PhantomJS 1.9 - Exam Partition 2 - some other test\n```\n\nYou can also combine the `parallel` option with the `partition` option to split tests, and then recombine partitions into parallel runs. This would, for example, allow you to run tests in multiple CI containers and have each CI container parallelize its list of tests.\n\nFor example, if you wanted to run your tests across two containers, but have one of them run twice as many tests as the other, and run them in parallel, you could do this:\n\n```bash\n# container 1\nember exam --split=3 --partition=1,2 --parallel\n```\n\n```bash\n# container 2\nember exam --split=3 --partition=3 --parallel\n```\n\n**Note 1**: _Ember Exam will respect the `parallel` setting of your [Testem config file](https://github.com/testem/testem/blob/master/docs/config_file.md#config-level-options) while running tests in parallel. The default value for `parallel` in Testem is 1, which means you'll need a non-default value to actually see parallel behavior._\n\n**Note 2**: _Ember Exam sets `process.env.EMBER_EXAM_SPLIT_COUNT` for convenience. You can use this in your Testem file._\n\n**Note 3**: _You must be using Testem version `1.5.0` or greater for this feature to work properly._\n\n### Filtering\n\nEmber Exam provides options to filter test suites by two types - module path and test file path.\n\n```bash\n$ ember exam --module-path=<module-path>\n```\n\nThe `module-path` option allows you to filter module paths by a given value. Module paths are mapped by test files and they are generated during `ember build`. After the build, `tests.js` file is created and it resides under <build-directory>/assets. The file is combined of all tests in an application and it has a form of `define(\"<module-path>\", others..`.\n\nThe value for `module-path` can have either string or regular expression, for instance:\n\n```bash\n# When module path value is string. This will run all modules which match with the passed value\n$ ember exam --module-path='dummy/tests/helpers/module-for-acceptance'\n\n# When module path value is regex. This will run all modules which have `dummy` in it\n$ ember exam --module-path='!/dummy/'\n```\n\nThe `file-path` option is to filter tests by *test file path*. The test file path is a location of the test file in a file system. You can specify `file-path` to a location of specific test file path or you can use wildcards in paths to target multiple test files.\n\n```bash\n# This will run tests that are defined in `/my-application/tests/unit/my-test.js`\n$ ember exam --file-path='/my-application/tests/unit/my-test.js'\n\n# This will run all test files that are under `/my-application/tests/unit/`\n$ ember exam --file-path='/my-application/tests/unit/*.js'\n```\n\n### Test Load Balancing\n\n```bash\n$ ember exam --parallel=<num> --load-balance\n```\n\nThe `load-balance` option allows you to load balance test files against multiple browsers. It will order the test files by test types, e.g. acceptance | integration | unit, and load balance the ordered test files between the browsers dynamically rather than statically.\n**Note:** parallel must be used along with load-balance to specify a number of browser(s)\n\nThe `load-balance` option was added to version 1.1 to address execution performance when running against a large test suite.\n\nWeb browsers and the testem server communicate via promise in order to send and receive test file. The promise timeout value is set to 15 seconds, and is configurable by adding `asyncTimeout=[timeout]` as a querystring param in the test URL or adding to the `test_page` option in the testem config.\nFor example, if you specify `load-balance` and `parallel` equals 3, then three browser instances will be created and the output will look something like:\n\n```bash\n# ember exam --parallel=3 --load-balance\nok 1 Chrome 66.0 - Browser Id 1 - some test\nok 2 Chrome 66.0 - Browser Id 2 - some another test\nok 3 Chrome 66.0 - Browser Id 3 - some the other test\n```\n\nYou can also specify the `split` and `partition` options with `load-balance` to load a portion of test modules on multiple CI containers.\n\n```bash\n$ ember exam --split=<num> --partition=<num> --parallel=<num> --load-balance\n```\n\nThis command will split test files and load-balance tests from the specified partition across the browsers. For example `ember exam --split=2 --partition=1 --parallel=3 --load-balance`, the complete list of test files are split into two halves. With the first half of the list load balanced against three browsers. The output will look something like below:\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=3 --load-balance\nok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test\nok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test\nok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test\n```\n\n\n**Important information on Load Balancing**\n\n1. The `--load-balance` option is currently only supported in CI mode and for that reason no-launch cannot be used with load-balance.\n2. You must be using `ember-cli` version 3.2.0 or greater for load balancing and test failure reproduction features to work properly.\n3. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly.\n4. You must be using `qunit` version 2.13.0 or greater for this feature to work properly.\n\n##### Test Failure Reproduction\n\nDue to the dynamic nature of the load-balance option, test file execution order can vary between runs. In order to reproduce a past test execution, the execution must be recorded via passing --write-execution-file or --wef, which allows generating a JSON file that enables rerunning the past test execution. The option is only allowed when load-balance is passed.\n\n```bash\n# The command will load in test balanced mode with <num> of browser(s). After the test suite execution, it will generate a test-execution json file.\n$ ember exam --parallel=<num> --load-balance --wef\n$ ember exam --parallel=<num> --load-balance --write-execution-file\n```\n\nThe file is stored in the root directory and the naming structure is `test-execution-<timestamp>.json`.\nTo replay the test execution for particular browser(s), do the following:\n\n```bash\n# The command will read a test execution file specified for `replay-execution` and execute a browser Id(s) from `replay-browser`\n$ ember exam --replay-execution=[string] --replay-browser=[num]\n```\n\n`replay-execution` allows you to specify a path to the json file to run execution against and `replay-browser` is to specify browser ID(s) to execute.\n\n```bash\n# The command will read test-execution-000000.json and load the list of modules mapped to browserId 1\n$ ember exam --replay-execution=test-execution-000000.json --replay-browser=1\n```\n\nThe above command will read `test-execution-000000.json` and load the list of modules which is mapped by browser ID #1.\n\n`replay-browser` can be an array of browser IDs. For instance `--replay-browser=1,2` will start two browsers and execute a list of modules which were previously run by browsers #1 and #2.\n\n```bash\n# The command will read test-execution-000000.json and load the list of module mapped to browserId 1 and 2\n$ ember exam --replay-execution=test-execution-000000.json --replay-browser=1,2\n```\n\nWhen `replay-browser` value is not specified it will execute browserId(s) read from `failedBrowser` in the test execution file.\n\n```bash\n# The command will read test-execution-000000.json and load the list of modules mapped to browserIds from failedBrowser in the json file.\n$ ember exam --replay-execution=test-execution-000000.json\n```\n\nWhen `replay-browser` value is not specified and there is no value for `failedBrowser` in the json file it will rerun all list of modules.\n\n```bash\n# The command will read test-execution-000000.json and load the list of module mapped to all browserIds when failedBrowser is none in the json file\n$ ember exam --replay-execution=test-execution-000000.json\n```\n\n**Important information on `--replay-execution` and `--replay-browser`**\n\n1. You must be using `ember-cli` version 3.2.0 or greater for load-balnce and test failure reproduction features to work properly.\n2. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly.\n3. You must be using `qunit` version 2.8.0 or greater for this feature to work properly.\n\n#### Preserve Test Name\n\nWhen using `--split` and/or `--load-balance` the output will look something like:\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=3 --load-balance\nok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test\nok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test\nok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test\n```\nHowever, if you change the amount of parallelization, or randomize across partitions, the output will change for the same test, which may be an issue if you are tracking test insights over time.\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=2 --load-balance\nok 1 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some test\nok 2 Chrome 66.0 - Exam Partition 1 - browser Id 1 - another test\nok 3 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some the other test\n```\nYou can add `--preserve-test-name` to remove the dynamic segments of the output (partition and browser) to ensure the output test names are always the same.\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=3 --load-balance --preserve-test-name\nok 1 Chrome 66.0 - some test\nok 2 Chrome 66.0 - another test\nok 3 Chrome 66.0 - some the other test\n```\n\n## Advanced Configuration\n\nEmber Exam does its best to allow you to run your test suite in a way that is effective for your individual needs. To that end, there are lots of advanced ways to configure your setup by integrating with other aspects of the Ember testing environment. The following sections will cover a few of the more common scenarios.\n\n### Ember Try & CI Integration\n\nIntegrating ember-exam with [ember-try](https://github.com/ember-cli/ember-try) is remarkably easy. Define a [`command` in your `ember-try.js` config](https://github.com/ember-cli/ember-try#configuration-files) that leverages the `exam` command:\n\n```js\n// config/ember-try.js\nmodule.exports = {\n  command: 'ember exam --split 3 --parallel',\n  // ...\n};\n```\n\nUsing [environmental variables](https://nodejs.org/api/process.html#process_process_env) gives you flexibility in how you run your tests. For instance, you could distribute your tests across processes instead of parallelizing them by specifying a `PARTITION` variable in your process environment and then consuming it like so:\n\n```js\nmodule.exports = {\n  command: 'ember exam --split 20 --partition ' + process.env.PARTITION,\n  // ...\n};\n```\n\nIf you are working with [Travis CI](https://travis-ci.org/) then you can also easily set up seeded-random runs based on PR numbers. Similar to the following:\n\n```js\nconst command = [ 'ember', 'exam', '--random' ];\nconst pr = process.env.TRAVIS_PULL_REQUEST;\n\nif (pr) {\n  command.push(pr);\n}\n\nmodule.exports = {\n  command: command.join(' '),\n  // ...\n};\n```\n\nYou can refer to [Travis' default environment variables](https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables) to see what else you could possibly leverage for your test setup.\n\n### Test Suite Segmentation\n\nSome test suites like to segment which tests run based on various facets such as type of test, feature being tested, and so on. This can be accomplished by leveraging Testem's ability to have multiple test pages:\n\n```json\n{\n  \"test_page\": [\n    \"tests/index.html?filter=acceptance\",\n    \"tests/index.html?filter=!acceptance\"\n  ]\n}\n```\n\nYou can use this feature in conjunction with Ember Exam's features, which will allow you to segment your test suite but still gain benefits from randomization and splitting.\n\n### Exceeding Browser Timeout\n\nIf you have a lot of tests you may run into a timeout error, especially in CI environments with constrained resources.\n\n```\nError: Browser timeout exceeded: 10s\n```\n\nYou can work around this by increasing `browser_disconnect_timeout` in testem.js:\n\n```js\nmodule.exports = {\n  browser_disconnect_timeout: 30,\n};\n```\n"
  },
  {
    "path": "RELEASE.md",
    "content": "# Release Process\n\nReleases in this repo are mostly automated using [release-plan](https://github.com/embroider-build/release-plan/). Once you label all your PRs correctly (see below) you will have an automatically generated PR that updates your CHANGELOG.md file and a `.release-plan.json` that is used to prepare the release once the PR is merged.\n\n## Preparation\n\nSince the majority of the actual release process is automated, the remaining tasks before releasing are:\n\n- correctly labeling **all** pull requests that have been merged since the last release\n- updating pull request titles so they make sense to our users\n\nSome great information on why this is important can be found at [keepachangelog.com](https://keepachangelog.com/en/1.1.0/), but the overall\nguiding principle here is that changelogs are for humans, not machines.\n\nWhen reviewing merged PR's the labels to be used are:\n\n- breaking - Used when the PR is considered a breaking change.\n- enhancement - Used when the PR adds a new feature or enhancement.\n- bug - Used when the PR fixes a bug included in a previous release.\n- documentation - Used when the PR adds or updates documentation.\n- internal - Internal changes or things that don't fit in any other category.\n\n**Note:** `release-plan` requires that **all** PRs are labeled. If a PR doesn't fit in a category it's fine to label it as `internal`\n\n## Release\n\nOnce the prep work is completed, the actual release is straight forward: you just need to merge the open [Plan Release](https://github.com/ember-cli/ember-exam/pulls?q=is%3Apr+is%3Aopen+%22Prepare+Release%22+in%3Atitle) PR\n"
  },
  {
    "path": "addon-test-support/-private/async-iterator.js",
    "content": "'use strict';\n\nconst iteratorCompleteResponse = { done: true, value: null };\n\n/**\n * A class to iterate a sequencial set of asynchronous events.\n *\n * @class AsyncIterator\n */\nexport default class AsyncIterator {\n  constructor(testem, options) {\n    this._testem = testem;\n    this._request = options.request;\n    this._response = options.response;\n    this._done = false;\n    this._current = null;\n    this._boundHandleResponse = this.handleResponse.bind(this);\n    this._waiting = false;\n    // Set a timeout value from either url parameter or default timeout value, 15 s.\n    this._timeout = options.timeout || 15;\n    this._browserId = options.browserId;\n    this._emberExamExitOnError = options.emberExamExitOnError;\n\n    testem.on(this._response, this._boundHandleResponse);\n  }\n\n  /**\n   * Indicates whether the response queue is done or not.\n   *\n   * @method done\n   * @return {bool} whether the response queue is done or not\n   */\n  get done() {\n    return this._done;\n  }\n\n  /**\n   * @method toString\n   * @return {String} the stringified value of the iterator.\n   */\n  toString() {\n    return `<AsyncIterator (request: ${this._request} response: ${this._response})>`;\n  }\n\n  /**\n   * Handle a response when it's waiting for a response\n   *\n   * @method handleResponse\n   * @param {*} response\n   */\n  handleResponse(response) {\n    if (this._waiting === false) {\n      throw new Error(\n        `${this.toString()} Was not expecting a response, but got a response`,\n      );\n    } else {\n      this._waiting = false;\n    }\n\n    try {\n      if (response.done) {\n        this.dispose();\n      }\n      this._current.resolve(response);\n    } catch (e) {\n      this._current.reject(e);\n    } finally {\n      this._current = null;\n\n      if (this.timer) {\n        clearTimeout(this.timer);\n      }\n    }\n  }\n\n  /**\n   * Dispose when an iteration is finished.\n   *\n   * @method dispose\n   */\n  dispose() {\n    this._done = true;\n    this._testem.removeEventCallbacks(\n      this._response,\n      this._boundHandleResponse,\n    );\n  }\n\n  /**\n   * Emit the current request.\n   *\n   * @method _makeNextRequest\n   */\n  _makeNextRequest() {\n    this._waiting = true;\n    this._testem.emit(this._request, this._browserId);\n  }\n\n  /**\n   * Set a timeout to reject a promise if it doesn't get response within the timeout threshold.\n   *\n   * @method _setTimeout\n   * @param {*} resolve\n   */\n  _setTimeout(resolve, reject) {\n    clearTimeout(this.timeout);\n    this.timer = setTimeout(() => {\n      if (!this._waiting) {\n        return;\n      }\n\n      if (this._emberExamExitOnError) {\n        let err = new Error(\n          `EmberExam: Promise timed out after ${this._timeout} s while waiting for response for ${this._request}`,\n        );\n        reject(err);\n      } else {\n        console.error(\n          `EmberExam: Promise timed out after ${this._timeout} s while waiting for response for ${this._request}. Closing browser to exit gracefully.`,\n        );\n        resolve(iteratorCompleteResponse);\n      }\n    }, this._timeout * 1000);\n  }\n\n  /**\n   * Gets the next response from the request and resolve the promise.\n   * if it's end of the iteration resolve the promise with done being true.\n   *\n   * @method next\n   * @return {Promise}\n   */\n  next() {\n    if (this._done) {\n      return Promise.resolve(iteratorCompleteResponse);\n    }\n    if (this._current) {\n      return this._current.promise;\n    }\n\n    let resolve, reject;\n    let promise = new Promise((_resolve, _reject) => {\n      resolve = _resolve;\n      reject = _reject;\n      this._setTimeout(resolve, reject);\n    });\n\n    this._current = {\n      resolve,\n      reject,\n      promise,\n    };\n\n    this._makeNextRequest();\n\n    return promise;\n  }\n}\n"
  },
  {
    "path": "addon-test-support/-private/ember-exam-test-loader.js",
    "content": "import { assert } from '@ember/debug';\nimport getUrlParams from './get-url-params';\nimport splitTestModules from './split-test-modules';\nimport weightTestModules from './weight-test-modules';\nimport { filterTestModules } from './filter-test-modules';\nimport { TestLoader } from 'ember-qunit/test-loader';\nimport AsyncIterator from './async-iterator';\nimport QUnit from 'qunit';\n\n/**\n * EmberExamQUnitTestLoader allows delayed requiring of test modules to enable test load balancing\n * It extends ember-qunit/test-loader used by `ember test`, since it overrides moduleLoadFailure()\n * to log a test failure when a module fails to load\n * @class EmberExamQUnitTestLoader\n * @extends {TestLoader}\n */\nexport default class EmberExamTestLoader extends TestLoader {\n  constructor(testem, urlParams, qunit = QUnit) {\n    super();\n    this._testModules = [];\n    this._testem = testem;\n    this._qunit = qunit;\n    this._urlParams = urlParams || getUrlParams();\n  }\n\n  get urlParams() {\n    return this._urlParams;\n  }\n\n  /**\n   * ember-cli-test-loader instantiates a new TestLoader instance and calls loadModules.\n   * EmberExamQUnitTestLoader does not support load() in favor of loadModules().\n   *\n   * @method load\n   */\n  static load() {\n    throw new Error(\"`EmberExamQUnitTestLoader` doesn't support `load()`.\");\n  }\n\n  /**\n   * require() collects the full list of modules before requiring each module with\n   * super.require(), instead of requiring and unseeing a module when each gets loaded.\n   *\n   * @method require\n   * @param {string} moduleName\n   */\n  require(moduleName) {\n    this._testModules.push(moduleName);\n  }\n\n  /**\n   * Make unsee a no-op to avoid any unwanted resets\n   *\n   * @method unsee\n   */\n  unsee() {}\n\n  /**\n   * Loads the test modules depending on the urlParam\n   *\n   * @method loadModules\n   */\n  async loadModules({ availableModules } = {}) {\n    const loadBalance = this._urlParams.get('loadBalance');\n    const browserId = this._urlParams.get('browser');\n    const modulePath = this._urlParams.get('modulePath');\n    const filePath = this._urlParams.get('filePath');\n    let partitions = this._urlParams.get('partition');\n    let split = parseInt(this._urlParams.get('split'), 10);\n\n    split = isNaN(split) ? 1 : split;\n\n    if (partitions === undefined) {\n      partitions = [1];\n    } else if (!Array.isArray(partitions)) {\n      partitions = [partitions];\n    }\n\n    if (!availableModules) {\n      super.loadModules();\n    } else {\n      assert(\n        `Available modules must be an object.`,\n        typeof availableModules === 'object',\n      );\n\n      this._availableModules = availableModules;\n      this._testModules = Object.keys(availableModules);\n    }\n\n    this.setupModuleMetadataHandler();\n\n    if (modulePath || filePath) {\n      this._testModules = filterTestModules(\n        this._testModules,\n        modulePath,\n        filePath,\n      );\n    }\n\n    if (loadBalance && this._testem) {\n      this.setupLoadBalanceHandlers();\n      this._testModules = splitTestModules(\n        weightTestModules(this._testModules),\n        split,\n        partitions,\n      );\n\n      this._testem.emit(\n        'testem:set-modules-queue',\n        this._testModules,\n        browserId,\n      );\n    } else {\n      this._testModules = splitTestModules(\n        this._testModules,\n        split,\n        partitions,\n      );\n\n      if (this._availableModules) {\n        await this.loadAvailableModules();\n        return;\n      }\n\n      /**\n       * Legacy support\n       */\n      this._testModules.forEach((moduleName) => {\n        super.require(moduleName);\n        super.unsee(moduleName);\n      });\n    }\n  }\n\n  /**\n   * availableModules are passed in from loadModules\n   * from loadEmberExam\n   * from start\n   */\n  async loadAvailableModules() {\n    if (this._availableModules) {\n      await Promise.all(\n        this._testModules.map(async (moduleName) => {\n          let loader = this._availableModules[moduleName];\n\n          /**\n           * If it's not a function, it's already loaded\n           */\n          if (typeof loader === 'function') {\n            await loader();\n          }\n        }),\n      );\n    }\n  }\n\n  /**\n   * Allow loading one module at a time.\n   *\n   * @method loadIndividualModule\n   * @param {string} moduleName\n   */\n  async loadIndividualModule(moduleName) {\n    if (moduleName === undefined) {\n      throw new Error(\n        'Failed to load a test module. `moduleName` is undefined in `loadIndividualModule`.',\n      );\n    }\n\n    if (this._availableModules) {\n      let loader = this._availableModules[moduleName];\n\n      /**\n       * If it's not a function, it's already loaded\n       */\n      if (typeof loader === 'function') {\n        await loader();\n      }\n\n      return;\n    }\n\n    super.require(moduleName);\n    super.unsee(moduleName);\n  }\n\n  /**\n   * setupModuleMetadataHandler() register QUnit callback to enable generating module metadata file.\n   *\n   * @method setupModuleMetadataHandler\n   */\n  setupModuleMetadataHandler() {\n    this._qunit.testDone((metadata) => {\n      if (typeof this._testem !== 'undefined' && this._testem !== null) {\n        // testem:test-done-metadata is sent to server to track test module details.\n        // metadata contains name, module, failed, passed, total, duration, skipped, and todo.\n        // https://api.qunitjs.com/callbacks/QUnit.testDone\n        this._testem.emit('testem:test-done-metadata', metadata);\n      }\n    });\n  }\n\n  /**\n   * setupLoadBalanceHandlers() registers QUnit callbacks needed for the load-balance option.\n   *\n   * @method setupLoadBalanceHandlers\n   */\n  setupLoadBalanceHandlers() {\n    // nextModuleAsyncIterator handles the async testem events\n    // it returns an element of {value: <moduleName>, done: boolean}\n    const nextModuleAsyncIterator = new AsyncIterator(this._testem, {\n      request: 'testem:next-module-request',\n      response: 'testem:next-module-response',\n      timeout: this._urlParams.get('asyncTimeout'),\n      browserId: this._urlParams.get('browser'),\n      emberExamExitOnError: this._urlParams.get('_emberExamExitOnError'),\n    });\n\n    const nextModuleHandler = () => {\n      // if there are already tests queued up, don't request next module\n      // this is possible if a test file has multiple qunit modules\n      if (this._qunit.config.queue.length > 0) {\n        return;\n      }\n\n      return nextModuleAsyncIterator\n        .next()\n        .then(async (response) => {\n          if (!response.done) {\n            const moduleName = response.value;\n            await this.loadIndividualModule(moduleName);\n\n            // if no tests were added, request the next module\n            if (this._qunit.config.queue.length === 0) {\n              return nextModuleHandler();\n            }\n          }\n        })\n        .catch((e) => {\n          if (\n            typeof e === 'object' &&\n            e !== null &&\n            typeof e.message === 'string'\n          ) {\n            e.message = `EmberExam: Failed to get next test module: ${e.message}`;\n          }\n          throw new Error(`EmberExam: Failed to get next test module: ${e}`);\n        });\n    };\n\n    // it registers qunit begin callback to ask for a next test moudle to execute when the test suite begins.\n    // By default ember-qunit adds `Ember.onerror` test to a qunit processing queue and once the test is complete it execute _qunit.moduleDone callback.\n    // However, when `setupEmberOnerrorValidation: false` is passed the test is disabled and _qunit.begin callback needs to request a next test module to run.\n    this._qunit.begin(() => {\n      return nextModuleHandler();\n    });\n\n    this._qunit.moduleDone(() => {\n      return nextModuleHandler();\n    });\n  }\n}\n"
  },
  {
    "path": "addon-test-support/-private/filter-test-modules.js",
    "content": "// A regular expression to help parsing a string to verify regex.\nconst MODULE_PATH_REGEXP = /^(!?)\\/(.*)\\/(i?)$/;\nconst TEST_PATH_REGEX = /\\/tests\\/(.*?)$/;\n\n/**\n * Return the matched test.\n * e.g. if an input is '!/weight/' it returns an array, ['!/weight/', '!', 'weight', ''];\n *\n * @function getRegexFilter\n * @param {*} modulePath\n */\nfunction getRegexFilter(modulePath) {\n  return MODULE_PATH_REGEXP.exec(modulePath);\n}\n\n/**\n * Determine if a given module path is matched with module filter with wildcard.\n * e.g. A given moduleFilter, /tests/integration/*, matches with /tests/integration/foo and /tests/integration/bar\n *\n * @function wildcardFilter\n * @param {*} module\n * @param {*} moduleFilter\n */\nfunction wildcardFilter(module, moduleFilter) {\n  // Generate a regular expression to handle wildcard from path filter\n  const moduleFilterRule = [\n    '^.*',\n    moduleFilter.split('*').join('.*'),\n    '$',\n  ].join('');\n  return new RegExp(moduleFilterRule).test(module);\n}\n\n/**\n * Return a list of test modules that contain a given module path string.\n *\n * @function stringFilter\n * @param {Array<string>} modules\n * @param {string} moduleFilter\n */\nfunction stringFilter(modules, moduleFilter) {\n  return modules.filter(\n    (module) =>\n      module.includes(moduleFilter) || wildcardFilter(module, moduleFilter),\n  );\n}\n\n/**\n * Return a list of test modules that matches with a given regular expression.\n *\n * @function regexFilter\n * @param {Array<string>} modules\n * @param {Array<string>} modulePathRegexFilter\n */\nfunction regexFilter(modules, modulePathRegexFilter) {\n  const re = new RegExp(modulePathRegexFilter[2], modulePathRegexFilter[3]);\n  const exclude = modulePathRegexFilter[1];\n\n  return modules.filter(\n    (module) => (!exclude && re.test(module)) || (exclude && !re.test(module)),\n  );\n}\n\n/**\n * Return a module path that's mapped by a given test file path.\n *\n * @function convertFilePathToModulePath\n * @param {*} filePath\n */\nfunction convertFilePathToModulePath(filePath) {\n  const filePathWithNoExtension = filePath.replace(/\\.[^/.]+$/, '');\n  const testFilePathMatch = TEST_PATH_REGEX.exec(filePathWithNoExtension);\n  if (typeof filePath !== 'undefined' && testFilePathMatch !== null) {\n    return testFilePathMatch[0];\n  }\n\n  return filePathWithNoExtension;\n}\n\n/**\n * Returns a list of test modules that match with the given module path filter or test file path.\n *\n * @function filterTestModules\n * @param {Array<string>} modules\n * @param {string} modulePath\n * @param {string} filePath\n */\nfunction filterTestModules(modules, modulePath, filePath) {\n  // Generates an array with module filter value seperated by comma (,).\n  const moduleFilters = (filePath || modulePath)\n    .split(',')\n    .map((value) => value.trim());\n\n  const filteredTestModules = moduleFilters.reduce((result, moduleFilter) => {\n    const modulePath = convertFilePathToModulePath(moduleFilter);\n    const modulePathRegex = getRegexFilter(modulePath);\n\n    if (modulePathRegex) {\n      return result.concat(\n        regexFilter(modules, modulePathRegex).filter(\n          (module) => result.indexOf(module) === -1,\n        ),\n      );\n    } else {\n      return result.concat(\n        stringFilter(modules, modulePath).filter(\n          (module) => result.indexOf(module) === -1,\n        ),\n      );\n    }\n  }, []);\n\n  if (filteredTestModules.length === 0) {\n    throw new Error(\n      `No tests matched with the filter: ${modulePath || filePath}.`,\n    );\n  }\n  return filteredTestModules;\n}\n\nexport { convertFilePathToModulePath, filterTestModules };\n"
  },
  {
    "path": "addon-test-support/-private/get-url-params.js",
    "content": "function decodeQueryParam(param) {\n  return decodeURIComponent(param.replace(/\\+/g, '%20'));\n}\n\n/**\n * Parses the url and return an object containing a param's key and value\n *\n * @export\n * @function getUrlParams\n * @return {Object} urlParams\n */\nexport default function getUrlParams() {\n  const urlParams = new Map();\n  const params = location.search.slice(1).split('&');\n\n  for (let i = 0; i < params.length; i++) {\n    if (params[i]) {\n      const param = params[i].split('=');\n      const name = decodeQueryParam(param[0]);\n\n      // Allow just a key to turn on a flag, e.g., test.html?noglobals\n      const value =\n        param.length === 1 || decodeQueryParam(param.slice(1).join('='));\n      if (urlParams.has(name)) {\n        urlParams.set(name, [].concat(urlParams.get(name), value));\n      } else {\n        urlParams.set(name, value);\n      }\n    }\n  }\n\n  return urlParams;\n}\n"
  },
  {
    "path": "addon-test-support/-private/patch-testem-output.js",
    "content": "/* globals Testem */\n\n/**\n * Returns a modified test name including browser or partition information\n *\n * @function updateTestName\n * @param {Map} urlParams\n * @param {string} testName\n * @return {string} testName\n */\nexport function updateTestName(urlParams, testName) {\n  const split = urlParams.get('split');\n  const loadBalance = urlParams.get('loadBalance');\n\n  const partition = urlParams.get('partition') || 1;\n  const browser = urlParams.get('browser') || 1;\n\n  const preserveTestName = !!urlParams.get('preserveTestName');\n\n  if (preserveTestName) {\n    return testName;\n  } else if (split && loadBalance) {\n    testName = `Exam Partition ${partition} - Browser Id ${browser} - ${testName}`;\n  } else if (split) {\n    testName = `Exam Partition ${partition} - ${testName}`;\n  } else if (loadBalance) {\n    testName = `Browser Id ${browser} - ${testName}`;\n  }\n\n  return testName;\n}\n\n/**\n * Setup testem test-result event to update the test name when a test completes\n *\n * @function patchTestemOutput\n * @param {Map} urlParams\n */\nexport function patchTestemOutput(urlParams) {\n  Testem.on('test-result', (test) => {\n    test.name = updateTestName(urlParams, test.name);\n  });\n}\n"
  },
  {
    "path": "addon-test-support/-private/split-test-modules.js",
    "content": "function createGroups(num) {\n  const groups = new Array(num);\n\n  for (let i = 0; i < num; i++) {\n    groups[i] = [];\n  }\n\n  return groups;\n}\n\nfunction splitIntoGroups(arr, numGroups) {\n  const groups = createGroups(numGroups);\n\n  for (let i = 0; i < arr.length; i++) {\n    groups[i % numGroups].push(arr[i]);\n  }\n\n  return groups;\n}\n\n/**\n * Splits the list of modules into unique subset of modules\n * return the subset indexed by the partition\n *\n * @export\n * @function splitTestModules\n * @param {Array<string>} modules\n * @param {number} split\n * @param {number} partitions\n * @return {Array<string>} tests\n */\nexport default function splitTestModules(modules, split, partitions) {\n  if (split < 1) {\n    throw new Error('You must specify a split greater than 0');\n  }\n\n  const testGroups = splitIntoGroups(modules, split);\n  const tests = [];\n\n  for (let i = 0; i < partitions.length; i++) {\n    const partition = parseInt(partitions[i], 10);\n    if (isNaN(partition)) {\n      throw new Error(\n        \"You must specify numbers for partition (you specified '\" +\n          partitions +\n          \"')\",\n      );\n    }\n\n    if (split < partition) {\n      throw new Error(\n        'You must specify partitions numbered less than or equal to your split value of ' +\n          split,\n      );\n    } else if (partition < 1) {\n      throw new Error('You must specify partitions numbered greater than 0');\n    }\n\n    const group = partition - 1;\n    tests.push(...testGroups[group]);\n  }\n\n  return tests;\n}\n"
  },
  {
    "path": "addon-test-support/-private/weight-test-modules.js",
    "content": "const TEST_TYPE_WEIGHT = {\n  unit: 10,\n  integration: 20,\n  acceptance: 150,\n};\nconst WEIGHT_REGEX = /\\/(unit|integration|acceptance)\\//;\nconst DEFAULT_WEIGHT = 50;\n\n/**\n * Return the weight for a given module name, a file path to the module\n * Ember tests consist of Acceptance, Integration, and Unit tests. In general, acceptance takes\n * longest time to execute, followed by integration and unit.\n * The weight assigned to a module corresponds to its test type execution speed, with slowest being the highest in weight.\n * If the test type is not identifiable from the modulePath, weight default to 50 (ordered after acceptance, but before integration)\n *\n * @function getWeight\n * @param {string} modulePath File path to a module\n */\nfunction getWeight(modulePath) {\n  const [, key] = WEIGHT_REGEX.exec(modulePath) || [];\n  if (typeof TEST_TYPE_WEIGHT[key] === 'number') {\n    return TEST_TYPE_WEIGHT[key];\n  } else {\n    return DEFAULT_WEIGHT;\n  }\n}\n\n/**\n * Returns the list of modules sorted by its weight\n *\n * @export\n * @function weightTestModules\n * @param {Array<string>} modules\n * @return {Array<string>}\n */\nexport default function weightTestModules(modules) {\n  const groups = new Map();\n\n  modules.forEach((module) => {\n    const moduleWeight = getWeight(module);\n    let moduleWeightGroup = groups.get(moduleWeight);\n\n    if (Array.isArray(moduleWeightGroup)) {\n      moduleWeightGroup.push(module);\n    } else {\n      moduleWeightGroup = [module];\n    }\n\n    groups.set(moduleWeight, moduleWeightGroup);\n  });\n\n  // return modules sorted by weight and alphabetically within its weighted groups\n  return Array.from(groups.keys())\n    .sort((a, b) => b - a)\n    .reduce((accumulatedArray, weight) => {\n      const sortedModuleArr = groups.get(weight).sort();\n      return accumulatedArray.concat(sortedModuleArr);\n    }, []);\n}\n"
  },
  {
    "path": "addon-test-support/index.d.ts",
    "content": "import { QUnitStartOptions } from 'ember-qunit';\n\nexport type EmberExamStartOptions = Omit<QUnitStartOptions, 'loadTests'> & {\n  availableModules: Record<string, unknown>;\n};\n\nexport function start(options: EmberExamStartOptions): Promise<void>;\n"
  },
  {
    "path": "addon-test-support/index.js",
    "content": "export { default as start } from './start';\n"
  },
  {
    "path": "addon-test-support/load.js",
    "content": "import EmberExamTestLoader from './-private/ember-exam-test-loader';\nimport { patchTestemOutput } from './-private/patch-testem-output';\n\nlet loaded = false;\n\n/**\n * Setup EmberExamTestLoader to enable ember exam functionalities\n *\n * @function loadEmberExam\n * @return {*} testLoader\n */\nexport default function loadEmberExam() {\n  if (loaded) {\n    console.warn('Attempted to load Ember Exam more than once.');\n    return;\n  }\n\n  loaded = true;\n\n  const testLoader = new EmberExamTestLoader(window.Testem);\n\n  if (window.Testem) {\n    patchTestemOutput(testLoader.urlParams);\n  }\n\n  return testLoader;\n}\n"
  },
  {
    "path": "addon-test-support/start.js",
    "content": "import loadEmberExam from './load';\nimport { start as qunitStart } from 'ember-qunit';\n\n/**\n * Equivalent to ember-qunit's loadTest() except this does not create a new TestLoader instance\n *\n * @function loadTests\n * @param {*} testLoader\n * @param {*} loaderOptions\n */\nasync function loadTests(testLoader, loaderOptions = {}) {\n  if (testLoader === undefined) {\n    throw new Error(\n      'A testLoader instance has not been created. You must call `loadEmberExam()` before calling `loadTest()`.',\n    );\n  }\n\n  await testLoader.loadModules(loaderOptions);\n}\n\n/**\n * Ember-exam's own start function to set up EmberExamTestLoader, load tests and calls start() from\n * ember-qunit\n *\n * @function start\n * @param {*} qunitOptions\n */\nexport default async function start(qunitOptions = {}) {\n  const { availableModules, ...modifiedOptions } =\n    qunitOptions || Object.create(null);\n\n  modifiedOptions.loadTests = false;\n\n  const testLoader = loadEmberExam();\n  await loadTests(testLoader, { availableModules });\n  qunitStart(modifiedOptions);\n}\n"
  },
  {
    "path": "docs-app/.gitignore",
    "content": "dist/\nnode_modules/\n.vitepress/dist\n.vitepress/cache\n"
  },
  {
    "path": "docs-app/.vitepress/config.mts",
    "content": "import { defineConfig } from 'vitepress'\n\n// https://vitepress.dev/reference/site-config\nexport default defineConfig({\n  title: \"ember-exam\",\n  description: \"Run your tests with randomization, splitting, and parallelization for beautiful tests.\",\n  base: '/ember-exam/',\n  markdown: {\n    // theme: {\n    //   ...dark,\n    //   settings: [\n    //     {\n    //       scope: 'comment',\n    //       settings: {\n    //         // 'foreground': 'rgb(200, 200, 200)'\n    //       }\n    //     },\n    //   ]\n    // },\n  },\n  themeConfig: {\n    // https://vitepress.dev/reference/default-theme-config\n    nav: [\n      { text: 'Home', link: '/' },\n      // { text: 'Examples', link: '/markdown-examples' }\n    ],\n\n    sidebar: [\n      {\n        text: 'Options',\n        items: [\n          { text: 'Randomization', link: '/randomization' },\n          { text: 'Randomization Iterator', link: '/randomization-iterator' },\n          { text: 'Generating Module Metadata For Test Execution', link: '/module-metadata' },\n          { text: 'Splitting', link: '/splitting' },\n          { text: 'Split Test Parallelization', link: '/split-parallel' },\n          { text: 'Filtering', link: '/filtering' },\n          { text: 'Test Load Balancing', link: '/load-balancing' },\n        ]\n      },\n      {\n        text: 'Advanced Configuration',\n        items: [\n          { text: 'Ember Try & CI Integration', link: '/ember-try-and-ci' },\n          { text: 'Test Suite Segmentation', link: '/test-suite-segmentation' },\n        ]\n      }\n    ],\n\n    socialLinks: [\n      { icon: 'github', link: 'https://github.com/ember-cli/ember-exam' }\n    ]\n  }\n})\n"
  },
  {
    "path": "docs-app/.vitepress/theme/index.ts",
    "content": "// https://vitepress.dev/guide/custom-theme\nimport { h } from 'vue'\nimport type { Theme } from 'vitepress'\nimport DefaultTheme from 'vitepress/theme'\nimport './style.css'\n\nexport default {\n  extends: DefaultTheme,\n  Layout: () => {\n    return h(DefaultTheme.Layout, null, {\n      // https://vitepress.dev/guide/extending-default-theme#layout-slots\n    })\n  },\n  enhanceApp({ app, router, siteData }) {\n    // ...\n  }\n} satisfies Theme\n"
  },
  {
    "path": "docs-app/.vitepress/theme/style.css",
    "content": "/**\n * Customize default theme styling by overriding CSS variables:\n * https://github.com/vuejs/vitepress/blob/main/src/client/theme-default/styles/vars.css\n */\n\n/**\n * Colors\n *\n * Each colors have exact same color scale system with 3 levels of solid\n * colors with different brightness, and 1 soft color.\n *\n * - `XXX-1`: The most solid color used mainly for colored text. It must\n *   satisfy the contrast ratio against when used on top of `XXX-soft`.\n *\n * - `XXX-2`: The color used mainly for hover state of the button.\n *\n * - `XXX-3`: The color for solid background, such as bg color of the button.\n *   It must satisfy the contrast ratio with pure white (#ffffff) text on\n *   top of it.\n *\n * - `XXX-soft`: The color used for subtle background such as custom container\n *   or badges. It must satisfy the contrast ratio when putting `XXX-1` colors\n *   on top of it.\n *\n *   The soft color must be semi transparent alpha channel. This is crucial\n *   because it allows adding multiple \"soft\" colors on top of each other\n *   to create a accent, such as when having inline code block inside\n *   custom containers.\n *\n * - `default`: The color used purely for subtle indication without any\n *   special meanings attached to it such as bg color for menu hover state.\n *\n * - `brand`: Used for primary brand colors, such as link text, button with\n *   brand theme, etc.\n *\n * - `tip`: Used to indicate useful information. The default theme uses the\n *   brand color for this by default.\n *\n * - `warning`: Used to indicate warning to the users. Used in custom\n *   container, badges, etc.\n *\n * - `danger`: Used to show error, or dangerous message to the users. Used\n *   in custom container, badges, etc.\n * -------------------------------------------------------------------------- */\n\n:root {\n  --vp-c-default-1: var(--vp-c-gray-1);\n  --vp-c-default-2: var(--vp-c-gray-2);\n  --vp-c-default-3: var(--vp-c-gray-3);\n  --vp-c-default-soft: var(--vp-c-gray-soft);\n\n  --vp-c-brand-1: var(--vp-c-indigo-1);\n  --vp-c-brand-2: var(--vp-c-indigo-2);\n  --vp-c-brand-3: var(--vp-c-indigo-3);\n  --vp-c-brand-soft: var(--vp-c-indigo-soft);\n\n  --vp-c-tip-1: var(--vp-c-brand-1);\n  --vp-c-tip-2: var(--vp-c-brand-2);\n  --vp-c-tip-3: var(--vp-c-brand-3);\n  --vp-c-tip-soft: var(--vp-c-brand-soft);\n\n  --vp-c-warning-1: var(--vp-c-yellow-1);\n  --vp-c-warning-2: var(--vp-c-yellow-2);\n  --vp-c-warning-3: var(--vp-c-yellow-3);\n  --vp-c-warning-soft: var(--vp-c-yellow-soft);\n\n  --vp-c-danger-1: var(--vp-c-red-1);\n  --vp-c-danger-2: var(--vp-c-red-2);\n  --vp-c-danger-3: var(--vp-c-red-3);\n  --vp-c-danger-soft: var(--vp-c-red-soft);\n}\n\n/**\n * Component: Button\n * -------------------------------------------------------------------------- */\n\n:root {\n  --vp-button-brand-border: transparent;\n  --vp-button-brand-text: var(--vp-c-white);\n  --vp-button-brand-bg: var(--vp-c-brand-3);\n  --vp-button-brand-hover-border: transparent;\n  --vp-button-brand-hover-text: var(--vp-c-white);\n  --vp-button-brand-hover-bg: var(--vp-c-brand-2);\n  --vp-button-brand-active-border: transparent;\n  --vp-button-brand-active-text: var(--vp-c-white);\n  --vp-button-brand-active-bg: var(--vp-c-brand-1);\n}\n\n/**\n * Component: Home\n * -------------------------------------------------------------------------- */\n\n:root {\n  --vp-home-hero-name-color: transparent;\n  --vp-home-hero-name-background: -webkit-linear-gradient(\n    120deg,\n    #bd34fe 30%,\n    #41d1ff\n  );\n\n  --vp-home-hero-image-background-image: linear-gradient(\n    -45deg,\n    #bd34fe 50%,\n    #47caff 50%\n  );\n  --vp-home-hero-image-filter: blur(44px);\n}\n\n@media (min-width: 640px) {\n  :root {\n    --vp-home-hero-image-filter: blur(56px);\n  }\n}\n\n@media (min-width: 960px) {\n  :root {\n    --vp-home-hero-image-filter: blur(68px);\n  }\n}\n\n/**\n * Component: Custom Block\n * -------------------------------------------------------------------------- */\n\n:root {\n  --vp-custom-block-tip-border: transparent;\n  --vp-custom-block-tip-text: var(--vp-c-text-1);\n  --vp-custom-block-tip-bg: var(--vp-c-brand-soft);\n  --vp-custom-block-tip-code-bg: var(--vp-c-brand-soft);\n}\n\n/**\n * Component: Algolia\n * -------------------------------------------------------------------------- */\n\n.DocSearch {\n  --docsearch-primary-color: var(--vp-c-brand-1) !important;\n}\n\n\n\n.badges {\n  > p {\n    display: flex;\n    gap: 1rem;\n    justify-content: center;\n    align-items: center;\n  }\n  img, a {\n    display: inline-flex;\n  }\n\n}\n\n/**\n* Contrast fixes\n*/\nhtml {\n  --vp-c-text-2: rgb(40, 40, 40);\n}\nhtml.dark {\n  --vp-c-text-2: rgb(200, 200, 200);\n}\n\nhtml [class*='language-'] > span.lang {\n  --vp-code-lang-color: rgb(40,40,40);\n}\nhtml.dark [class*='language-'] > span.lang {\n  --vp-code-lang-color: rgb(200,200,200);\n}\n"
  },
  {
    "path": "docs-app/ember-try-and-ci.md",
    "content": "### Ember Try & CI Integration\n\nIntegrating ember-exam with [ember-try](https://github.com/ember-cli/ember-try) is remarkably easy. Define a [`command` in your `ember-try.js` config](https://github.com/ember-cli/ember-try#configuration-files) that leverages the `exam` command:\n\n```js\n// config/ember-try.js\nmodule.exports = {\n  command: 'ember exam --split 3 --parallel',\n  // ...\n};\n```\n\nUsing [environmental variables](https://nodejs.org/api/process.html#process_process_env) gives you flexibility in how you run your tests. For instance, you could distribute your tests across processes instead of parallelizing them by specifying a `PARTITION` variable in your process environment and then consuming it like so:\n\n```js\nmodule.exports = {\n  command: 'ember exam --split 20 --partition ' + process.env.PARTITION,\n  // ...\n};\n```\n\nIf you are working with [Travis CI](https://travis-ci.org/) then you can also easily set up seeded-random runs based on PR numbers. Similar to the following:\n\n```js\nconst command = ['ember', 'exam', '--random'];\nconst pr = process.env.TRAVIS_PULL_REQUEST;\n\nif (pr) {\n  command.push(pr);\n}\n\nmodule.exports = {\n  command: command.join(' '),\n  // ...\n};\n```\n\nYou can refer to [Travis' default environment variables](https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables) to see what else you could possibly leverage for your test setup.\n"
  },
  {
    "path": "docs-app/filtering.md",
    "content": "### Filtering\n\nEmber Exam provides options to filter test suites by two types - module path and test file path.\n\n```bash\n$ ember exam --module-path=<module-path>\n```\n\n#### For Vite Apps\n\nThe `file-path` option allows you to filter modules by the given relative path that is generated from `import.meta.glob(...)` in your `tests/index.html`.\n\n```bash\n# This will run tests that are defined in `/my-application/tests/unit/my-test.js`\n$ ember exam --file-path='/my-application/tests/unit/my-test.js'\n\n# This will run all test files that are under `/my-application/tests/unit/`\n$ ember exam --file-path='/my-application/tests/unit/*.js'\n```\n\n\n#### For non-Vite Apps\n\n\nThe `module-path` option allows you to filter module paths by a given value. Module paths are mapped by test files and they are generated during `ember build`. After the build, `tests.js` file is created and it resides under [build-directory]/assets. \n\nThe file is combined of all tests in an application and it has a form of `define(\"<module-path>\", others..`.\n\nThe value for `module-path` can have either string or regular expression, for instance:\n\n```bash\n# When module path value is string. This will run all modules which match with the passed value\n$ ember exam --module-path='dummy/tests/helpers/module-for-acceptance'\n\n# When module path value is regex. This will run all modules which have `dummy` in it\n$ ember exam --module-path='!/dummy/'\n```\n\nThe `file-path` option is to filter tests by *test file path*. The test file path is a location of the test file in a file system. You can specify `file-path` to a location of specific test file path or you can use wildcards in paths to target multiple test files.\n\n```bash\n# This will run tests that are defined in `/my-application/tests/unit/my-test.js`\n$ ember exam --file-path='/my-application/tests/unit/my-test.js'\n\n# This will run all test files that are under `/my-application/tests/unit/`\n$ ember exam --file-path='/my-application/tests/unit/*.js'\n```\n"
  },
  {
    "path": "docs-app/index.md",
    "content": "---\n# https://vitepress.dev/reference/default-theme-home-page\nlayout: home\n\nhero:\n  name: \"ember-exam\"\n  # text: \"Run your tests with randomization, splitting, and parallelization for beautiful tests.\"\n  tagline: \"Run your tests with randomization, splitting, and parallelization for beautiful tests.\"\n  actions:\n    - theme: brand\n      text: Quickstart \n      link: /quickstart\n    # - theme: alt\n    #   text: API Examples\n    #   link: /api-examples\n\nfeatures:\n  - title: Partitioning \n    details: Specify the number of parallel browser instances to use to speed up your test suite. \n  - title: Load Balancing \n    details: Balance tests to maximize the effectivess of parallel browsers that would otherwise completely quickly due to happenstance of being given quickly running tests.\n  - title: Randomization \n    details: Find and eliminate brittle tests by changing the order of tests within the test suite. \n  - title: Replay \n    details: Record and replay test execution order for reliably reproducing potentially flaky behaviors. \n---\n\n\n<span class=\"badges\">\n\n![Build Status](https://github.com/ember-cli/ember-exam/actions/workflows/ci.yml/badge.svg?event=push)\n[![NPM Version](https://badge.fury.io/js/ember-exam.svg)][npm]\n[![Ember Observer Score](https://emberobserver.com/badges/ember-exam.svg)][score]\n\n</span>\n\n[npm]: https://npmjs.com/package/ember-exam\n[score]: https://emberobserver.com/addons/ember-exam\n\nEmber Exam is an addon to allow you more control over how you run your tests when used in conjunction with [ember-qunit](https://github.com/emberjs/ember-qunit). It provides the ability to randomize, split, parallelize, and load-balance your test suite by adding a more robust CLI command.\n\nIt started as a way to help reduce flaky tests and encourage healthy test driven development. \n\n[![Introduction to Ember Exam](https://cloud.githubusercontent.com/assets/2922250/22800360/157ad67c-eed7-11e6-8d33-d2c59238c7f1.png)](https://embermap.com/video/ember-exam)\n\n\n"
  },
  {
    "path": "docs-app/load-balancing.md",
    "content": "# Test Load Balancing\n\n```bash\nember exam --parallel=<num> --load-balance\n```\n\nThe `load-balance` option allows you to load balance test files against multiple browsers. It will order the test files by test types, e.g. acceptance | integration | unit, and load balance the ordered test files between the browsers dynamically rather than statically.\n**Note:** parallel must be used along with load-balance to specify a number of browser(s)\n\nThe `load-balance` option was added to version 1.1 to address execution performance when running against a large test suite.\n\nWeb browsers and the testem server communicate via promise in order to send and receive a test file. The promise timeout value is set to be 2 seconds, and the timeout can be customized by adding asyncTimeout=[timeout] as a querystring param in the test URL or adding to a testem config.\nFor example, if you specify `load-balance` and `parallel` equals 3, then three browser instances will be created and the output will look something like:\n\n```bash\n# ember exam --parallel=3 --load-balance\nok 1 Chrome 66.0 - Browser Id 1 - some test\nok 2 Chrome 66.0 - Browser Id 2 - some another test\nok 3 Chrome 66.0 - Browser Id 3 - some the other test\n```\n\nYou can also specify the `split` and `partition` options with `load-balance` to load a portion of test modules on multiple CI containers.\n\n```bash\nember exam --split=<num> --partition=<num> --parallel=<num> --load-balance\n```\n\nThis command will split test files and load-balance tests from the specified partition across the browsers. For example `ember exam --split=2 -partition=1 --parallel=3 --load-balance`, the complete list of test files are split into two halves. With the first half of the list load balanced against three browsers. The output will look something like below:\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=3 --load-balance\nok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test\nok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test\nok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test\n```\n\n**Important information on Load Balancing**\n\n1. The `--load-balance` option is currently only supported in CI mode and for that reason no-launch cannot be used with load-balance.\n2. You must be using `ember-cli` version 3.2.0 or greater for load balancing and test failure reproduction features to work properly.\n3. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly.\n4. You must be using `qunit` version 2.13.0 or greater for this feature to work properly.\n\n## Test Failure Reproduction\n\nDue to the dynamic nature of the load-balance option, test file execution order can vary between runs. In order to reproduce a past test execution, the execution must be recorded via passing --write-execution-file or --wef, which allows generating a JSON file that enables rerunning the past test execution. The option is only allowed when load-balance is passed.\n\n```bash\n# The command will load in test balanced mode with <num> of browser(s). After the test suite execution, it will generate a test-execution json file.\nember exam --parallel=<num> --load-balance --wef\nember exam --parallel=<num> --load-balance --write-execution-file\n```\n\nThe file is stored in the root directory and the naming structure is `test-execution-<timestamp>.json`.\nTo replay the test execution for particular browser(s), do the following:\n\n```bash\n# The command will read a test execution file specified for `replay-execution` and execute a browser Id(s) from `replay-browser`\nember exam --replay-execution=[string] --replay-browser=[num]\n```\n\n`replay-execution` allows you to specify a path to the json file to run execution against and `replay-browser` is to specify browser ID(s) to execute.\n\n```bash\n# The command will read test-execution-000000.json and load the list of modules mapped to browserId 1\nember exam --replay-execution=test-execution-000000.json --replay-browser=1\n```\n\nThe above command will read `test-execution-000000.json` and load the list of modules which is mapped by browser ID #1.\n\n`replay-browser` can be an array of browser IDs. For instance `--replay-browser=1,2` will start two browsers and execute a list of modules which were previously run by browsers #1 and #2.\n\n```bash\n# The command will read test-execution-000000.json and load the list of module mapped to browserId 1 and 2\nember exam --replay-execution=test-execution-000000.json --replay-browser=1,2\n```\n\nWhen `replay-browser` value is not specified it will execute browserId(s) read from `failedBrowser` in the test execution file.\n\n```bash\n# The command will read test-execution-000000.json and load the list of modules mapped to browserIds from failedBrowser in the json file.\nember exam --replay-execution=test-execution-000000.json\n```\n\nWhen `replay-browser` value is not specified and there is no value for `failedBrowser` in the json file it will rerun all list of modules.\n\n```bash\n# The command will read test-execution-000000.json and load the list of module mapped to all browserIds when failedBrowser is none in the json file\nember exam --replay-execution=test-execution-000000.json\n```\n\n**Important information on `--replay-execution` and `--replay-browser`**\n\n1. You must be using `ember-cli` version 3.2.0 or greater for load-balnce and test failure reproduction features to work properly.\n2. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly.\n3. You must be using `qunit` version 2.8.0 or greater for this feature to work properly.\n"
  },
  {
    "path": "docs-app/module-metadata.md",
    "content": "### Generating Module Metadata File For Test Execution\n\n```bash\n$ ember exam --write-module-metadata-file\n$ ember exam --wmmf\n```\n\nThe `--write-module-metadata-file`, `wmmf` as an alias, allows you to generate a module metadata file after a test run. The module metadata file provides information about the test modules executed.\n\nIt creates a json file, `module-metadata-<timestamp>.json`, which contains an array of elements representing metadata of modules executed by sorted by ascending order:\n```json\n[\n  {\n    \"moduleName\": \"Module-name\",\n    \"total\": \"Total number of tests in the module\",\n    \"passed\": \"A number of passed tests in the module\",\n    \"failed\": \"A number of failed tests in the module\",\n    \"skipped\": \"A number of skipped tests in the module\",\n    \"duration\": \"(ms) duration to execute all tests within the module\",\n    \"failedTests\": \"A list of failed tests\"\n  }\n]\n```\n\nand it looks something like below:\n```json\n[\n  {\n    \"moduleName\": \"Slowest-module\",\n    \"total\": 12,\n    \"passed\": 9,\n    \"failed\": 1,\n    \"skipped\": 2,\n    \"duration\": 153,\n    \"failedTests\": [\"failed-test-1\"]\n  },\n  {\n    \"moduleName\": \"Fastest-module\",\n    \"total\": 2,\n    \"passed\": 1,\n    \"failed\": 0,\n    \"skipped\": 0,\n    \"duration\": 123,\n    \"failedTests\": []\n  }\n]\n```\n\n"
  },
  {
    "path": "docs-app/package.json",
    "content": "{\n  \"name\": \"docs\",\n  \"private\": true,\n  \"version\": \"1.0.0\",\n  \"scripts\": {\n    \"docs:dev\": \"vitepress dev .\",\n    \"docs:build\": \"vitepress build .\",\n    \"docs:preview\": \"vitepress preview .\"\n  },\n  \"keywords\": [],\n  \"author\": \"\",\n  \"license\": \"MIT\",\n  \"packageManager\": \"pnpm@10.33.0\",\n  \"devDependencies\": {\n    \"@algolia/client-search\": \"5.46.4\",\n    \"search-insights\": \"2.17.3\",\n    \"typescript\": \"5.9.3\",\n    \"vite\": \"7.3.2\",\n    \"vitepress\": \"1.6.4\",\n    \"vitepress-plugin-llms\": \"1.10.0\",\n    \"vue\": \"3.5.31\"\n  },\n  \"dependencies\": {\n    \"shiki\": \"^3.8.1\"\n  }\n}\n"
  },
  {
    "path": "docs-app/preserve-test-name.md",
    "content": "# Preserve Test Name\n\nWhen using `--split` and/or `--load-balance` the output will look something like:\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=3 --load-balance\nok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test\nok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test\nok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test\n```\nHowever, if you change the amount of parallelization, or randomize accross partitions, the output will change for the same test, which may be an issue if you are tracking test insights over time. \n\n```bash\n# ember exam --split=2 --partition=1 --parallel=2 --load-balance\nok 1 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some test\nok 2 Chrome 66.0 - Exam Partition 1 - browser Id 1 - another test\nok 3 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some the other test\n```\nYou can add `--preserve-test-name` to remove the dynamic segments of the output (partition and browser) to ensure the output test names are always the same.\n\n```bash\n# ember exam --split=2 --partition=1 --parallel=3 --load-balance --preserve-test-name\nok 1 Chrome 66.0 - some test\nok 2 Chrome 66.0 - another test\nok 3 Chrome 66.0 - some the other test\n```"
  },
  {
    "path": "docs-app/quickstart.md",
    "content": "# Quickstart\n\n## Installation\n\nInstallation is as easy as running:\n\n```bash\nnpm add --save-dev ember-exam\n```\n\n## Usage\n\nUsing Ember Exam is fairly straightforward as it extends directly from the default Ember-CLI `test` command. So, by default, it will work exactly the same as `ember test`.\n\n```bash\nember exam\nember exam --filter='acceptance'\nember exam --server\nember exam --load-balance --parallel=1\n```\n\nA value to an option can be passed with either `=` or a space.\n\n```bash\n# A value of filter is acceptance\nember exam --filter 'acceptance'\n\n# A value of parallel is 2\nember exam --load-balance --parallel=2 --server --no-launch\n\n# If a `=` is not used to pass a value to an option that requires a value, it will take anything passed after a space as it's value\n# In this instance, the value of parallel is --server\nember exam --load-balance --parallel --server --no-launch\n```\n\nThe idea is that you can replace `ember test` with `ember exam` and never look back.\n\nTo get the unique features of Ember Exam (described in-depth below), you will need to **replace** the use of `start()` from `ember-qunit` in `test-helper.js` with `start()` from `ember-exam`:\n\n\n## Setup\n\n### With Vite\n\n\nUpdate your test-helper.js or test-helper.ts, to have add the ember-exam `start` function:\n```diff\n  // ...\n  import { setApplication } from '@ember/test-helpers';\n  import { setup } from 'qunit-dom';\n- import { start as qunitStart, setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { start as startEmberExam } from 'ember-exam/test-support';\n\n- export function start() {\n+ export async function start({ availableModules }) {\n    setApplication(Application.create(config.APP));\n\n    setup(QUnit.assert);\n    setupEmberOnerrorValidation();\n\n-   qunitStart();\n+   // Options passed to `start` will be passed-through to ember-qunit\n+   await startEmberExam({ availableModules });\n  }\n```\n\nThen, update your tests/index.html to pass availableModules to start:\n```html\n<script type=\"module\">\n  import { start } from './test-helper.js';\n\n  const availableModules = {\n    ...import.meta.glob('./application/**/*-test.{js,ts,gjs,gts}'),\n    ...import.meta.glob('./rendering/**/*-test.{js,ts,gjs,gts}'),\n    ...import.meta.glob('./unit/**/*-test.{js,ts,gjs,gts}'),\n  };\n\n\tstart({ availableModules });\n</script>\n```\n\n We need to tell vite to build the app before telling ember/exam to run tests on that output.\n\nTesting development:\n```bash \nNODE_ENV=development vite build --mode development\nember exam --path dist --config-file ./testem.cjs\n```\n\nTesting production:\n```bash\nvite build --mode test\nember exam --path dist --config-file ./testem.cjs\n```\n\n> [!NOTE]\n> Specifying the `--path` is important because otherwise ember-cli will try to build your vite app, and it will error. \n\n> [!NOTE]\n> Specifying the `--config-path` is important because ember-cli (what backs ember-exam) doesn't know about cjs files. \n\n\n### broccoli / ember-cli \n\n\nTo get the unique features of Ember Exam (described in-depth below), you will need to **replace** the use of `start()` from `ember-qunit` in `test-helper.js` with `start()` from `ember-exam`:\n\n```js\n// test-helper.js\n- import { start, setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { setupEmberOnerrorValidation } from 'ember-qunit';\n+ import { start } from 'ember-exam/test-support';\n\n// Options passed to `start` will be passed-through to ember-qunit\nstart();\n```\n\n### Version < `3.0.0`\n\nPrior to `2.1.0`, Ember Exam must be loaded by importing `addon-test-support/load.js` and calling `loadEmberExam`:\n\n```js\n// test-helper.js\nimport loadEmberExam from 'ember-exam/test-support/load';\n\nloadEmberExam();\n```\n"
  },
  {
    "path": "docs-app/randomization-iterator.md",
    "content": "# Randomization Iterator\n\nRandomization can be helpful for identifying non-atomic or order-dependent tests. To that end, Ember Exam provides an iterator to make it easy to test lots of variations in your test suite order quickly.\n\n```bash\nember exam:iterate <num>\n```\n\nThis command will build your application once, and then run the test suite with the `random` option for the specified number of iterations. You can optionally skip the build by using a previous build via the `path` option:\n\n```bash\nember exam:iterate <num> --path <build-path>\n```\n\nFinally, you can pass additional options through to the exam command used to run the tests via the `options` flag:\n\n```bash\nember exam:iterate <num> --options <options>\n```\n\nThe `options` should be a string matching what you would use via the CLI.\n\n"
  },
  {
    "path": "docs-app/randomization.md",
    "content": "# Randomization\n\n```bash\nember exam --random[=<seed>]\n```\n\nThe `random` option allows you to randomize the order in which your tests run. You can optionally specify a \"seed\" value from which to randomize your tests in order to reproduce results. The seed can be any string value. Regardless of whether you specify a seed or not, Ember Exam will log the seed value used for the randomization at the beginning of the test run:\n\n```bash\nember exam --random\nRandomizing tests with seed: liv5d1ixkco6qlatl6o7mbo6r\n\nember exam --random=this_is1337\nRandomizing tests with seed: this_is1337\n```\n\nIf you use `random` without specifying a seed, it must be the last argument you pass. Otherwise, Ember Exam will attempt to interpret any following arguments as the seed value. In other words:\n\n```bash\n# don't do this\nember exam --random --split=2\nRandomizing tests with seed: --split=2 # this is not what we wanted\n\n# do this instead\nember exam --split=2 --random\nRandomizing tests with seed: hwr74nkk55vzpvi\n```\n\n_Note: You must be using QUnit version `1.23.0` or greater for this feature to work properly.\n"
  },
  {
    "path": "docs-app/split-parallel.md",
    "content": "# Split Test Parallelization\n\n```bash\nember exam --split=<num> --parallel\n```\n\nThe `parallel` option allows you to run your split tests across multiple test pages in parallel in [Testem](https://github.com/testem/testem). It will use a separate browser instance for each group of tests. So, if you specify a split of 3, then 3 browser instances will be spawned with the output looking something like:\n\n```bash\nok 1 PhantomJS 1.9 - Exam Partition 1 - some test\nok 2 PhantomJS 1.9 - Exam Partition 3 - some other other test\nok 3 PhantomJS 1.9 - Exam Partition 2 - some other test\n```\n\nYou can also combine the `parallel` option with the `partition` option to split tests, and then recombine partitions into parallel runs. This would, for example, allow you to run tests in multiple CI containers and have each CI container parallelize its list of tests.\n\nFor example, if you wanted to run your tests across two containers, but have one of them run twice as many tests as the other, and run them in parallel, you could do this:\n\n```bash\n# container 1\nember exam --split=3 --partition=1,2 --parallel\n```\n\n```bash\n# container 2\nember exam --split=3 --partition=3 --parallel\n```\n\n**Note 1**: _Ember Exam will respect the `parallel` setting of your [Testem config file](https://github.com/testem/testem/blob/master/docs/config_file.md#config-level-options) while running tests in parallel. The default value for `parallel` in Testem is 1, which means you'll need a non-default value to actually see parallel behavior._\n\n**Note 2**: _Ember Exam sets `process.env.EMBER_EXAM_SPLIT_COUNT` for convenience. You can use this in your Testem file._\n\n**Note 3**: _You must be using Testem version `1.5.0` or greater for this feature to work properly._\n"
  },
  {
    "path": "docs-app/splitting.md",
    "content": "# Splitting\n\n```bash\nember exam --split=<num>\n```\n\nThe `split` option allows you to specify the number of partitions greater than one to spread your tests across. Ember Exam will then proceed to run the first batch of tests.\n\n```bash\nember exam --split=<num> --partition=<num>\n```\n\nThe `partition` option allows you to specify which test group to run after using the `split` option. It is one-indexed, so if you specify a split of 3, the last group you could run is 3 as well. You can also run multiple partitions, e.g.:\n\n```bash\nember exam --split=4 --partition=1 --partition=2\n```\n\n_Note: Ember Exam splits tests by modifying the ember-qunit's `TestLoader` to bucket each test file into a partition, where each partition has an even number of test files. This makes it possible to have unbalanced partitions. To run your tests with balanced partitions, consider using `--load-balance`. For more info, see [_Test Load Balancing_](#test-load-balancing).\n"
  },
  {
    "path": "docs-app/test-suite-segmentation.md",
    "content": "# Test Suite Segmentation\n\nSome test suites like to segment which tests run based on various facets such as type of test, feature being tested, and so on. This can be accomplished by leveraging Testem's ability to have multiple test pages:\n\n```json\n{\n  \"test_page\": [\n    \"tests/index.html?filter=acceptance\",\n    \"tests/index.html?filter=!acceptance\"\n  ]\n}\n```\n\nYou can use this feature in conjunction with Ember Exam's features, which will allow you to segment your test suite but still gain benefits from randomization and splitting.\n"
  },
  {
    "path": "docs-app/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"module\": \"esnext\",\n    \"target\": \"esnext\",\n    \"moduleResolution\": \"bundler\",\n    \"esModuleInterop\": true,\n    \"strict\": true,\n    \"skipLibCheck\": true,\n    \"noUnusedLocals\": true,\n    \"resolveJsonModule\": true,\n    \"verbatimModuleSyntax\": true,\n    \"jsx\": \"preserve\",\n    \"lib\": [\"esnext\", \"dom\", \"dom.iterable\"]\n  },\n  \"exclude\": [\n    \"**/node_modules/**\",\n    \"**/dist/**\",\n    \"template\",\n    \"bin\",\n    \"docs/snippets\",\n    \"scripts\"\n  ]\n}\n"
  },
  {
    "path": "ember-cli-build.js",
    "content": "'use strict';\n\nconst EmberAddon = require('ember-cli/lib/broccoli/ember-addon');\n\nmodule.exports = function (defaults) {\n  const self = defaults.project.findAddonByName('ember-exam');\n  const autoImport = self.options.autoImport;\n  let app = new EmberAddon(defaults, {\n    autoImport,\n    babel: {\n      plugins: [\n        // ... any other plugins\n        require.resolve('ember-concurrency/async-arrow-task-transform'),\n\n        // NOTE: put any code coverage plugins last, after the transform.\n      ],\n    },\n  });\n\n  const { maybeEmbroider } = require('@embroider/test-setup');\n  return maybeEmbroider(app, {});\n};\n"
  },
  {
    "path": "eslint.config.mjs",
    "content": "import globals from \"globals\";\nimport { ember } from \"ember-eslint\";\nimport * as url from \"url\";\n\n// Needed until Node 20\nconst dirname = url.fileURLToPath(new URL(\".\", import.meta.url));\n\nexport default [\n  ...ember.recommended(dirname),\n  {\n    name: \"monorepo-root:ignores\",\n    ignores: [\n      \"docs-app/**/*\",\n      \"test-apps/**/*\",\n      \"acceptance-dist/**/*\",\n      \"failure-dist/**/*\",\n      \"addon-test-support/index.d.ts\",\n    ],\n  },\n  {\n    name: \"monorepo-root:lib\",\n    files: [\"lib/**/*\"],\n    languageOptions: {\n      globals: {\n        ...globals.node,\n      },\n    },\n  },\n  {\n    name: \"monorepo-root:node-tests\",\n    files: [\"node-tests/**/*\"],\n    languageOptions: {\n      globals: {\n        ...globals.node,\n        ...globals.mocha,\n      },\n    },\n    rules: {\n      \"ember/no-test-support-import\": \"off\",\n    },\n  },\n];\n"
  },
  {
    "path": "index.js",
    "content": "/* eslint-env node */\n\n'use strict';\n\nmodule.exports = {\n  name: require('./package').name,\n\n  includedCommands() {\n    return require('./lib/commands');\n  },\n};\n"
  },
  {
    "path": "lib/commands/exam/iterate.js",
    "content": "'use strict';\n\nmodule.exports = {\n  name: 'exam:iterate',\n\n  description:\n    \"Runs your app's test suite in a random order for a number of iterations with the 'exam' command.\",\n\n  works: 'insideProject',\n\n  anonymousOptions: ['<iterations>'],\n\n  availableOptions: [\n    {\n      name: 'options',\n      type: String,\n      default: '',\n      description: 'A string of options to passthrough to the exam command',\n    },\n    {\n      name: 'path',\n      type: String,\n      default: '',\n      description: 'The output path of a previous build to run tests against',\n    },\n  ],\n\n  /**\n   * The output directory of the build used to run the test iterations.\n   *\n   * @type {String}\n   */\n  _outputDir: 'iteration-dist',\n\n  /**\n   * Runs `ember exam` with random seeds for a number of iterations. The results\n   * of each run are displayed in a table at the end of the command. This is\n   * useful for pre-emptively identifying flaky/non-atomic tests in an offline\n   * job.\n   *\n   * @override\n   */\n  async run(commandOptions, anonymousOptions) {\n    const needsBuild = !commandOptions.path;\n\n    if (needsBuild) {\n      await this._buildForTests();\n    } else {\n      this._outputDir = commandOptions.path;\n    }\n\n    const numIterations = parseInt(anonymousOptions[0], 10);\n    const options = commandOptions.options;\n    const results = await this._runIterations(numIterations, options);\n\n    if (needsBuild) {\n      await this._cleanupBuild();\n    }\n\n    await this._write(results.toString(), true);\n  },\n\n  /**\n   * Writes out a line with a standard color, unless specifically turned off.\n   *\n   * @param {String} input\n   * @param {Boolean} noColor\n   */\n  async _write(input, noColor) {\n    if (!noColor) {\n      const chalk = (await import('chalk')).default;\n      input = chalk.blue(input);\n    }\n\n    console.info(input);\n  },\n\n  /**\n   * Builds the application into a special output directory to run the tests\n   * against repeatedly without rebuilding.\n   */\n  async _buildForTests() {\n    await this._write('\\nBuilding app for test iterations.');\n    const { execa } = await import('execa');\n    await execa(\n      './node_modules/.bin/ember',\n      ['build', '--output-path', `${this._outputDir}`],\n      { stdio: 'inherit' },\n    );\n  },\n\n  /**\n   * Cleans up the build artifacts used for the test iterations.\n   */\n  async _cleanupBuild() {\n    await this._write('\\nCleaning up test iterations.\\n');\n    const { execa } = await import('execa');\n    await execa('rm', ['-rf', `${this._outputDir}`]);\n  },\n\n  /**\n   * Runs iterations of the test suite and returns a table to display the\n   * results.\n   *\n   * @param {Number} numIterations\n   * @param {String} options\n   * @return {Table} results\n   */\n  async _runIterations(numIterations, options) {\n    const chalk = (await import('chalk')).default;\n    const Table = require('cli-table3');\n\n    const results = new Table({\n      head: [\n        chalk.blue('Iteration'),\n        chalk.blue('Seed'),\n        chalk.blue('Exit Code'),\n        chalk.blue('Command'),\n      ],\n    });\n\n    for (let i = 0; i < numIterations; i++) {\n      await this._write('\\nRunning iteration #' + (i + 1) + '.');\n      const result = await this._runTests(options);\n      results.push([i].concat(result));\n    }\n\n    await this._write('\\nRan ' + numIterations + ' iterations.');\n\n    return results;\n  },\n\n  /**\n   * Runs the test suite in a random order while allowing additional options.\n   * Returns an array representing a row in the result table for _runIterations.\n   *\n   * @param {String} options\n   * @return {Array} results\n   */\n  async _runTests(options) {\n    const chalk = (await import('chalk')).default;\n    const execSync = require('child_process').execSync;\n\n    const seed = Math.random().toString(36).slice(2);\n    const command =\n      './node_modules/.bin/ember exam --random ' +\n      seed +\n      ' --path ' +\n      this._outputDir +\n      ' ' +\n      options;\n    let exitCode;\n\n    try {\n      execSync(command, { stdio: 'inherit' });\n      exitCode = 0;\n    } catch (error) {\n      await this._write('Returned non-zero exit code with error: ' + error);\n      exitCode = 1;\n      process.exitCode = 1;\n    }\n\n    const color = exitCode ? chalk.red : chalk.green;\n    return [color(seed), color(exitCode), color(command)];\n  },\n};\n"
  },
  {
    "path": "lib/commands/exam.js",
    "content": "'use strict';\n\nconst { addToQuery } = require('../utils/query-helper');\n// npmlog is used to write to testem server logs and `--testem-debug` enables to save the log file.\nconst log = require('npmlog');\nconst {\n  combineOptionValueIntoArray,\n  getBrowserId,\n  getMultipleTestPages,\n} = require('../utils/test-page-helper');\nconst TestemEvents = require('../utils/testem-events');\nconst TestCommand = require('ember-cli/lib/commands/test');\nconst TestServerTask = require('./task/test-server');\nconst TestTask = require('./task/test');\n\nmodule.exports = TestCommand.extend({\n  name: 'exam',\n\n  description: `Runs your app's test suite with more options than 'test'.`,\n\n  works: 'insideProject',\n\n  availableOptions: [\n    {\n      name: 'split',\n      type: Number,\n      description: 'A number of files to split your tests across.',\n    },\n    {\n      name: 'partition',\n      type: [Array, Number, String],\n      description: 'The number of the partition(s) to run after splitting.',\n    },\n    {\n      name: 'parallel',\n      type: [Number, String],\n      description: 'Runs your split tests on parallel child processes.',\n    },\n    {\n      name: 'load-balance',\n      type: Boolean,\n      default: false,\n      description:\n        'Load balance test modules. Test modules will be sorted by weight from slowest (acceptance) to fastest (unit).',\n    },\n    {\n      name: 'preserve-test-name',\n      type: Boolean,\n      default: false,\n      aliases: ['ptn'],\n      description:\n        'Preserve the test name when using load balance or split by omitting the partition and browser numbers.',\n    },\n    {\n      name: 'random',\n      type: String,\n      default: false,\n      description:\n        'Randomizes your modules and tests while running your test suite.',\n    },\n    {\n      name: 'module-path',\n      type: [String],\n      aliases: ['mp'],\n      description:\n        'Filters the list of modules to only those that matches by module paths, the value accepts either string or regex.',\n    },\n    {\n      name: 'file-path',\n      type: [String],\n      aliases: ['fp'],\n      description:\n        'Filters the list of modules to only those that matches by test file paths, the value accepts either string or regex.',\n    },\n    {\n      name: 'replay-execution',\n      type: String,\n      default: false,\n      aliases: ['re'],\n      description:\n        'A JSON file path which maps from browser id(s) to a list of modules',\n    },\n    {\n      name: 'replay-browser',\n      type: [Array, Number, String],\n      aliases: ['rb'],\n      description: 'The browser id(s) to replay from the replay-execution file',\n    },\n    {\n      name: 'write-execution-file',\n      type: Boolean,\n      default: false,\n      aliases: ['wef'],\n      description:\n        'Allows writing a test-execution json file after running your test suite',\n    },\n    {\n      name: 'write-module-metadata-file',\n      type: Boolean,\n      default: false,\n      aliases: ['wmmf'],\n      description:\n        'Allows writing a module metadata json file after running your test suite',\n    },\n  ].concat(TestCommand.prototype.availableOptions),\n\n  init() {\n    this._super(...arguments);\n    this.tasks.Test = TestTask;\n    this.tasks.TestServer = TestServerTask;\n    this.testemEvents = new TestemEvents(this.project.root);\n    this.emberCliVersion =\n      this.project.pkg.devDependencies['ember-cli'] ||\n      this.project.pkg.dependencies['ember-cli'];\n  },\n\n  /**\n   *  Validates commandOptions\n   *\n   * @private\n   * @param {Object} commandOptions\n   * @return {Object} A map of what switches are enabled\n   */\n  _validateOptions(commandOptions) {\n    const Validator = require('../utils/tests-options-validator');\n    const validator = new Validator(commandOptions, this.emberCliVersion);\n    return validator.validateCommands();\n  },\n\n  /**\n   * Validates the command options and then runs the original test command.\n   *\n   * @param {Object} commandOptions\n   * @override\n   */\n  run(commandOptions) {\n    this.commands = this._validateOptions(commandOptions);\n\n    // TODO: explore not mutating the commandOptions input\n    if (commandOptions.split) {\n      commandOptions.query = addToQuery(\n        commandOptions.query,\n        'split',\n        commandOptions.split,\n      );\n\n      process.env.EMBER_EXAM_SPLIT_COUNT = commandOptions.split;\n\n      // Ignore the partition option when paralleling (we'll fill it in later)\n      if (!commandOptions.parallel && commandOptions.partition) {\n        const partitions = combineOptionValueIntoArray(\n          commandOptions.partition,\n        );\n        for (let i = 0; i < partitions.length; i++) {\n          commandOptions.query = addToQuery(\n            commandOptions.query,\n            'partition',\n            partitions[i],\n          );\n        }\n      }\n    }\n\n    if (commandOptions.modulePath) {\n      commandOptions.query = addToQuery(\n        commandOptions.query,\n        'modulePath',\n        commandOptions.modulePath,\n      );\n    }\n\n    if (commandOptions.preserveTestName) {\n      commandOptions.query = addToQuery(\n        commandOptions.query,\n        'preserveTestName',\n        commandOptions.preserveTestName,\n      );\n    }\n\n    if (commandOptions.filePath) {\n      commandOptions.query = addToQuery(\n        commandOptions.query,\n        'filePath',\n        commandOptions.filePath,\n      );\n    }\n\n    if (commandOptions.loadBalance) {\n      commandOptions.query = addToQuery(\n        commandOptions.query,\n        'loadBalance',\n        commandOptions.loadBalance,\n      );\n    }\n\n    if (commandOptions.replayBrowser) {\n      commandOptions.replayBrowser = combineOptionValueIntoArray(\n        commandOptions.replayBrowser,\n      );\n    }\n\n    if (typeof commandOptions.random !== 'undefined') {\n      commandOptions.query = this._randomize(\n        commandOptions.random,\n        commandOptions.query,\n      );\n    }\n\n    return this._super.run.apply(this, arguments);\n  },\n\n  /**\n   * Adds a `seed` param to the `query` to support randomization. Currently\n   * only works with QUnit.\n   *\n   * @param {string} random\n   * @param {string} query\n   * @return {string}\n   */\n  _randomize(random, query) {\n    const seed = random !== '' ? random : Math.random().toString(36).slice(2);\n\n    this.ui.writeLine('Randomizing tests with seed: ' + seed);\n\n    return addToQuery(query, 'seed', seed);\n  },\n\n  /**\n   * Customizes the Testem config to have multiple test pages if attempting to\n   * run in parallel or load-balance. It is important that the user specifies\n   * the number of launchers to run in parallel in their testem.json config.\n   *\n   * @param {Object} commandOptions\n   * @override\n   */\n  _generateCustomConfigs(commandOptions) {\n    const config = this._super._generateCustomConfigs.apply(this, arguments);\n    let additionalEvents = this._setupAndGetBrowserSocketEvents(config);\n\n    if (commandOptions.loadBalance || commandOptions.replayExecution) {\n      const loadBalancingEvents = this._getLoadBalancingBrowserSocketEvents(\n        {\n          isLoadBalance: this.commands.get('loadBalance'),\n          isReplayExecution: this.commands.get('replayExecution'),\n          isWriteExecutionFile: this.commands.get('writeExecutionFile'),\n        },\n        this.testemEvents,\n      );\n      additionalEvents = Object.assign(additionalEvents, loadBalancingEvents);\n    }\n\n    config.custom_browser_socket_events = Object.assign(\n      config.custom_browser_socket_events || {},\n      additionalEvents,\n    );\n\n    if (\n      !commandOptions.loadBalance &&\n      !commandOptions.replayExecution &&\n      !commandOptions.parallel\n    )\n      return config;\n\n    config.testPage = getMultipleTestPages(config, commandOptions);\n\n    if (commandOptions.replayExecution) {\n      this.testemEvents.setReplayExecutionMap(\n        commandOptions.replayExecution,\n        commandOptions.replayBrowser,\n      );\n    }\n\n    return config;\n  },\n\n  /**\n   * Returns an event object to enable to send and receive module metadata\n   *\n   * @param {Object} config\n   */\n  _setupAndGetBrowserSocketEvents(config) {\n    const commands = this.commands;\n    const testemEvents = this.testemEvents;\n    const ui = this.ui;\n\n    const browserExitHandler = function (failed = false) {\n      const launcherId = this.launcher.id;\n      if (!failed && commands.get('loadBalance')) {\n        const browserId = getBrowserId(this.launcher);\n        log.info(\n          `Browser ${browserId} exiting. [ # of modules in current module queue ${\n            testemEvents.stateManager.getTestModuleQueue().length\n          } ]`,\n        );\n        // if getBrowserId cannot get the browserId\n        // but the test queue is not empty, report the number of test modules left in the queue\n        // otherwise, fail because testModuleQueue was not set\n        if (browserId === 0) {\n          if (testemEvents.stateManager.getTestModuleQueue() !== null) {\n            ui.writeLine(\n              `[ # of modules in current module queue ${\n                testemEvents.stateManager.getTestModuleQueue().length\n              } ]`,\n            );\n          } else {\n            throw new Error('testModuleQueue is not set.');\n          }\n        }\n      }\n\n      // config.testPage is undefined when parallization options are not used\n      // Set browserCount default value to 1 to allow\n      let browserCount = 1;\n      // When using multiple browsers config.testPage is an array of test page urls.\n      if (typeof config.testPage !== 'undefined') {\n        browserCount = Object.keys(config.testPage).length;\n      }\n\n      testemEvents.completedBrowsersHandler(\n        browserCount,\n        launcherId,\n        ui,\n        commands,\n        Date.now(),\n      );\n    };\n\n    const browserTerminationHandler = function () {\n      // browserTerminationHandler is called for disconnect, processError or processExit events.\n      // disconnect and processExit events is fired during global error and successful test runs.\n      // On successful test runs, browserExitHandler should already be called. And is unnecessary\n      // to call it again, so we should return. This is covered by this.finish = true\n      // On global failure cases, it's possible that this.finish is also true. So we must check\n      // the timers set by onProcessExit\n      // https://github.com/testem/testem/blob/master/lib/runners/browser_test_runner.js#L266\n      // or onProcessError in testem.\n      // https://github.com/testem/testem/blob/master/lib/runners/browser_test_runner.js#L252\n      // If either timers is set, we should record the failed browser and call browserExitHandler\n      if (this.finished && !this.onProcessExitTimer && !this.pendingTimer) {\n        return;\n      }\n      if (commands.get('writeExecutionFile')) {\n        testemEvents.recordFailedBrowserId(this.launcher, ui);\n      }\n\n      browserExitHandler.call(this, true);\n    };\n\n    return this._getModuleMetadataAndBrowserExitSocketEvents(\n      browserExitHandler,\n      browserTerminationHandler,\n    );\n  },\n\n  /**\n   * Add browser socket events are needed for both with load-balance and without load-balance\n   *\n   * @param {Object} browserExitHandler\n   * @param {Object} browserTerminationHandler\n   */\n  _getModuleMetadataAndBrowserExitSocketEvents(\n    browserExitHandler,\n    browserTerminationHandler,\n  ) {\n    const events = {};\n    const testemEvents = this.testemEvents;\n    let init = false;\n\n    events['tests-start'] = function () {\n      if (!init) {\n        // process object is instantiated only when browsers are launched by testem server.\n        // 1. `ember test/exam` where browsers are instantiated by testem - process is available\n        // 2. `ember test/exam --server` where browsers can be instantiated by testem or manually\n        // - process is available only when browsers are instantiated by testem\n        // 3. `ember test/exam --serve --no-launch` where browsers are instantiated manually - process is undefined\n        // 4. `ember serve` where browsers are instantiated manually by developer - process is available.\n        if (typeof this.process !== 'undefined' && this.process !== null) {\n          this.process.on('processExit', browserTerminationHandler.bind(this));\n          this.process.on('processError', browserTerminationHandler.bind(this));\n        }\n        init = true;\n      }\n\n      if (typeof this.launcher !== 'undefined' && this.launcher !== null) {\n        testemEvents.recordStartedLauncherId(this.launcher.id);\n      }\n    };\n\n    events['after-tests-complete'] = browserExitHandler;\n\n    events['disconnect'] = function () {\n      // To prevent handling exiting browser browser disconnects from errors `disconnect` callback's needed to be registered.\n      browserTerminationHandler.bind(this)();\n    };\n\n    events['testem:test-done-metadata'] = (details) => {\n      // Ensure module detail is available\n      if (typeof details === 'object' && details !== null) {\n        //store module name, test name, # of failed assertion, and duration.\n        this.testemEvents.recordModuleMetadata({\n          moduleName: details.module,\n          testName: details.name,\n          passed: details.passed == details.total,\n          failed: details.failed > 0,\n          skipped: details.skipped,\n          duration: details.runtime,\n        });\n      }\n    };\n\n    return events;\n  },\n\n  /**\n   * Return an event object which enables load balancing.\n   * These event handlers will be registered on Testem's browserTestRunner socket instance\n   *\n   * @param {Object} commands\n   * @param {Object} testemEvents\n   */\n  _getLoadBalancingBrowserSocketEvents(\n    { isLoadBalance, isReplayExecution, isWriteExecutionFile },\n    testemEvents,\n  ) {\n    const events = {};\n    const ui = this.ui;\n\n    events['testem:set-modules-queue'] = function (modules, browserId) {\n      testemEvents.setModuleQueue(\n        browserId,\n        modules,\n        isLoadBalance,\n        isReplayExecution,\n      );\n    };\n    events['testem:next-module-request'] = function (browserId) {\n      testemEvents.nextModuleResponse(\n        browserId,\n        this.socket,\n        isWriteExecutionFile,\n      );\n    };\n    events['test-result'] = function (result) {\n      if (result.failed && isWriteExecutionFile) {\n        testemEvents.recordFailedBrowserId(this.launcher, ui);\n      }\n    };\n\n    return events;\n  },\n});\n"
  },
  {
    "path": "lib/commands/index.js",
    "content": "'use strict';\n\nmodule.exports = {\n  exam: require('./exam'),\n  'exam:iterate': require('./exam/iterate'),\n};\n"
  },
  {
    "path": "lib/commands/task/test-server.js",
    "content": "const TestServerTask = require('ember-cli/lib/tasks/test-server');\n\nmodule.exports = TestServerTask.extend({\n  transformOptions(options) {\n    const transformOptions = this._super(...arguments);\n    transformOptions.custom_browser_socket_events =\n      options.custom_browser_socket_events;\n    transformOptions.browser_module_mapping = options.browser_module_mapping;\n\n    return transformOptions;\n  },\n});\n"
  },
  {
    "path": "lib/commands/task/test.js",
    "content": "const TestTask = require('ember-cli/lib/tasks/test');\n\nmodule.exports = TestTask.extend({\n  transformOptions(options) {\n    const transformOptions = this._super(...arguments);\n    transformOptions.custom_browser_socket_events =\n      options.custom_browser_socket_events;\n    transformOptions.browser_module_mapping = options.browser_module_mapping;\n\n    if (options.loadBalance) {\n      /**\n       * the parallel option is how testem knows to boot browsers simultaneously.\n       * setting testPage to an array isn't enough.\n       * default behavior is 1 browser at a time, which defeats the purpose of loadBalance.\n       */\n      transformOptions.parallel = options.testPage.length;\n    }\n\n    return transformOptions;\n  },\n});\n"
  },
  {
    "path": "lib/utils/config-reader.js",
    "content": "'use strict';\n\nconst fs = require('fs-extra');\nconst yaml = require('js-yaml');\nconst path = require('path');\nconst debug = require('debug')('exam:config-reader');\n\nconst potentialConfigFiles = ['testem.js', 'testem.json', 'testem.cjs'];\n\n/**\n * Given an array of file paths, returns the first one that exists and is\n * accessible. Paths are relative to the process' cwd.\n *\n * @param {Array<string>} files\n * @return {string} file\n */\nfunction _findValidFile(files) {\n  for (let i = 0; i < files.length; i++) {\n    // TODO: investigate this cwd() usually they are in-error...\n    const file = path.join(process.cwd(), files[i]);\n    try {\n      fs.accessSync(file, fs.F_OK);\n      return file;\n    } catch (error) {\n      debug(`Failed to find ${file} due to error: ${error}`);\n      continue;\n    }\n  }\n}\n\n/**\n * Reads in a given file according to it's 'type' as determined by file\n * extension. Supported types are `js` and `json`.\n *\n * @param {string} file\n * @return {Object} fileContents\n */\nfunction _readFileByType(file) {\n  if (typeof file === 'string') {\n    const fileType = file.split('.').pop();\n    switch (fileType) {\n      case 'js':\n      case 'cjs':\n        return require(file);\n      case 'json':\n        return fs.readJsonSync(file);\n      case 'yaml':\n        return yaml.load(fs.readFileSync(file));\n      default:\n        throw new Error(`Unrecognized file extension for: ${file}`);\n    }\n  }\n}\n\n/**\n * Gets the application's testem config by trying a custom file first and then\n * defaulting to either `testem.js` or `testem.json`.\n *\n * @param {string} file\n * @param {Array<string>} potentialFiles\n * @return {Object} config\n */\nmodule.exports = function readTestemConfig(\n  file,\n  potentialFiles = potentialConfigFiles,\n) {\n  if (file) {\n    potentialFiles.unshift(file);\n  }\n\n  const configFile = _findValidFile(potentialFiles);\n\n  return configFile && _readFileByType(configFile);\n};\n"
  },
  {
    "path": "lib/utils/execution-state-manager.js",
    "content": "'use strict';\n\n/**\n * A class to store the state of an execution.\n *\n * @class ExecutionStateManager\n */\nclass ExecutionStateManager {\n  constructor(replayExecutionMap) {\n    // A set of launcher id of attached browsers\n    this._startedLaunchers = new Set();\n\n    // A map of browserId to test modules executed on that browser read from test-execution.json.\n    this._replayExecutionMap = replayExecutionMap || null;\n\n    // A map of browserId to test modules executed for the current test execution.\n    this._browserToModuleMap = new Map();\n\n    // A map keeping the module execution details\n    this._moduleMetadata = new Map();\n\n    // An array keeping the browserId of a browser with failing test\n    this._failedBrowsers = [];\n    this._completedBrowsers = new Map();\n\n    // An array of modules to load balance against browsers. This is used by `--load-balance`\n    this._testModuleQueue = null;\n\n    // A map of browserId to an array of test modules. This is used by `--replay-execution`\n    this._replayExecutionModuleQueue = null;\n  }\n\n  /**\n   * Returns the startedLaunchers\n   *\n   * @returns {Set}\n   */\n  getStartedLaunchers() {\n    return this._startedLaunchers;\n  }\n\n  /**\n   * Add a new laucnher id to the startedLaunchers array.\n   *\n   * @param {number} launcherId\n   * @returns {Boolean}\n   */\n  addToStartedLaunchers(launcherId) {\n    return this._startedLaunchers.add(launcherId);\n  }\n\n  /**\n   * Returns the replayExecutionMap\n   *\n   * @returns {Object}\n   */\n  getReplayExecutionMap() {\n    return this._replayExecutionMap;\n  }\n\n  /**\n   * Sets the replayExecutionMap\n   *\n   * @param {Object} replayModuleMap\n   */\n  setReplayExecutionMap(replayModuleMap) {\n    this._replayExecutionMap = replayModuleMap;\n  }\n\n  /**\n   * Returns the testModuleQueue\n   *\n   * @returns {Object}\n   */\n  getTestModuleQueue() {\n    return this._testModuleQueue;\n  }\n\n  /**\n   * Sets the shared module queue.\n   *\n   * @param {Object} moduleQueue\n   */\n  setTestModuleQueue(moduleQueue) {\n    this._testModuleQueue = moduleQueue;\n  }\n\n  /**\n   * Gets the next module from the shared module queue\n   *\n   * @returns {string}\n   */\n  getNextModuleTestModuleQueue() {\n    if (this._testModuleQueue) {\n      return this._testModuleQueue.shift();\n    }\n    return null;\n  }\n\n  /**\n   * Returns the array of modules belonging to browserId\n   *\n   * @param {number} browserId\n   * @returns {Array<number>}\n   */\n  getReplayExecutionModuleQueue(browserId) {\n    if (this._replayExecutionModuleQueue) {\n      return this._replayExecutionModuleQueue.get(browserId);\n    }\n    return null;\n  }\n\n  /**\n   * Sets the array of modules in browser module queue for browserId\n   *\n   * @param {Array<string>} moduleQueue\n   * @param {number} browserId\n   */\n  setReplayExecutionModuleQueue(moduleQueue, browserId) {\n    if (!this._replayExecutionModuleQueue) {\n      this._replayExecutionModuleQueue = new Map();\n    }\n    this._replayExecutionModuleQueue.set(browserId, moduleQueue.slice());\n  }\n\n  /**\n   * Gets the next module from the module array of browserId\n   *\n   * @param {number} browserId\n   * @returns {string}\n   */\n  getNextModuleReplayExecutionModuleQueue(browserId) {\n    if (\n      this._replayExecutionModuleQueue &&\n      this._replayExecutionModuleQueue.get(browserId)\n    ) {\n      return this._replayExecutionModuleQueue.get(browserId).shift();\n    }\n    return null;\n  }\n\n  /**\n   * Returns the TestModuleQueue\n   *\n   * @returns {Set<number>}\n   */\n  getFailedBrowsers() {\n    return this._failedBrowsers;\n  }\n\n  /**\n   * Returns the whether or not the browserId is contained in the failBrowsers array.\n   *\n   * @param {number} browserId\n   * @returns {Boolean}\n   */\n  containsFailedBrowser(browserId) {\n    return this._failedBrowsers.includes(browserId);\n  }\n\n  /**\n   * Add a new browserId to the failedBrowser array.\n   *\n   * @param {number} browserId\n   * @returns {Boolean}\n   */\n  addFailedBrowsers(browserId) {\n    return this._failedBrowsers.push(browserId);\n  }\n\n  /**\n   * Returns the a map of browserId to modules array\n   *\n   * @returns {Object}\n   */\n  getModuleMap() {\n    return this._browserToModuleMap;\n  }\n\n  /**\n   * Returns an array of modules run details\n   *\n   * @returns {Array}\n   */\n  getModuleMetadata() {\n    return this._moduleMetadata;\n  }\n\n  /**\n   * Pushes the moduleName into the moduleArray of browserId\n   *\n   * @param {string} moduleName\n   * @param {number} browserId\n   */\n  addModuleNameToReplayExecutionMap(moduleName, browserId) {\n    let browserModuleList = this._browserToModuleMap.get(browserId);\n    if (Array.isArray(browserModuleList)) {\n      browserModuleList.push(moduleName);\n    } else {\n      browserModuleList = [moduleName];\n    }\n    this._browserToModuleMap.set(browserId, browserModuleList);\n  }\n\n  /**\n   * Add module metadata mapped by moduleName to moduleMetadata Map.\n   *\n   * @param {string} moduleName\n   * @param {number} total - Total number of tests\n   * @param {number} passed - Number of passed tests\n   * @param {number} failed - Number of failed tests\n   * @param {number} duration - duration to execute tests in module in ms\n   * @param {Array<string>} failedTests - A list of failed test names\n   */\n  _injectModuleMetadata(\n    moduleName,\n    total,\n    passed,\n    failed,\n    skipped,\n    duration,\n    failedTests,\n  ) {\n    this._moduleMetadata.set(moduleName, {\n      moduleName,\n      total,\n      passed,\n      failed,\n      skipped,\n      duration,\n      failedTests,\n    });\n  }\n\n  /**\n   * Pushes the module detail into the moduleMetadata array\n   *\n   * @param {Object} metaData\n   */\n  addToModuleMetadata(metadata) {\n    if (!this._moduleMetadata.has(metadata.moduleName)) {\n      // modulename, total, passed, failed, skipped, duration, failed tests\n      this._injectModuleMetadata(metadata.moduleName, 0, 0, 0, 0, 0, []);\n    }\n\n    const curModuleMetadata = this._moduleMetadata.get(metadata.moduleName);\n\n    if (!metadata.skipped && metadata.failed) {\n      curModuleMetadata.failedTests.push(metadata.testName);\n    }\n\n    this._injectModuleMetadata(\n      metadata.moduleName,\n      curModuleMetadata.total + 1,\n      !metadata.skipped && metadata.passed\n        ? curModuleMetadata.passed + 1\n        : curModuleMetadata.passed,\n      !metadata.skipped && metadata.failed\n        ? curModuleMetadata.failed + 1\n        : curModuleMetadata.failed,\n      metadata.skipped\n        ? curModuleMetadata.skipped + 1\n        : curModuleMetadata.skipped,\n      curModuleMetadata.duration + metadata.duration,\n      curModuleMetadata.failedTests,\n    );\n  }\n\n  /**\n   * Returns the number of completed browsers\n   *\n   * @returns {number}\n   */\n  getCompletedBrowser() {\n    return this._completedBrowsers.size;\n  }\n\n  /**\n   * Book keep the browser id that has completed\n   *\n   * @param {number} browserId\n   */\n  incrementCompletedBrowsers(browserId) {\n    this._completedBrowsers.set(browserId, true);\n  }\n}\n\nmodule.exports = ExecutionStateManager;\n"
  },
  {
    "path": "lib/utils/file-system-helper.js",
    "content": "const fs = require('fs-extra');\n\n/**\n * Creates a file with targetJsonObject\n *\n * @param {string} fileName\n * @param {Object} targetJsonObject\n * @param {Object} option\n */\nmodule.exports = function writeJsonToFile(\n  fileName,\n  targetJsonObject,\n  option = {},\n) {\n  try {\n    fs.writeJsonSync(fileName, targetJsonObject, option);\n  } catch (err) {\n    if (typeof err === 'object' && err !== null) {\n      err.file = err.file || fileName;\n    }\n    throw err;\n  }\n};\n"
  },
  {
    "path": "lib/utils/query-helper.js",
    "content": "'use strict';\n\n/**\n * Creates a valid query string by appending a given param and value to query.\n *\n * @param {string} query\n * @param {string} param\n * @param {string} value\n */\nfunction addToQuery(query, param, value) {\n  if (!value) {\n    return query;\n  }\n\n  const queryAddParam = query ? query + '&' + param : param;\n\n  return value !== true ? queryAddParam + '=' + value : queryAddParam;\n}\n\n/**\n * Adds a valid query string to a given url.\n *\n * @param {string} url\n * @param {string} param\n * @param {string} value\n */\nfunction addToUrl(url, param, value) {\n  const urlParts = url.split('?');\n  const base = urlParts[0];\n  const query = urlParts[1];\n\n  return base + '?' + addToQuery(query, param, value);\n}\n\nmodule.exports = {\n  addToQuery,\n  addToUrl,\n};\n"
  },
  {
    "path": "lib/utils/test-page-helper.js",
    "content": "'use strict';\n\nconst fs = require('fs-extra');\nconst readTestemConfig = require('../utils/config-reader');\nconst { addToUrl } = require('./query-helper');\n\n/**\n * Add paramater such as split, loadbalance or partition to a base url if options are valid.\n *\n * @param {Object} commandOptions\n * @param {string} baseUrl\n * @return {string} baseUrl\n */\nfunction _appendParamToBaseUrl(commandOptions, baseUrl) {\n  if (commandOptions.parallel || commandOptions.split) {\n    baseUrl = addToUrl(baseUrl, 'split', commandOptions.split);\n  }\n  // `loadBalance` is added to url when running replay-execution in order to emit `set-module-queue` in patch-test-loader.\n  if (commandOptions.loadBalance || commandOptions.replayExecution) {\n    const partitions = commandOptions.partition;\n    baseUrl = addToUrl(baseUrl, 'loadBalance', true);\n    if (partitions) {\n      for (let i = 0; i < partitions.length; i++) {\n        baseUrl = addToUrl(baseUrl, 'partition', partitions[i]);\n      }\n    }\n  }\n\n  return baseUrl;\n}\n\n/**\n * Generates an array by parsing optionValue. optionValue can be in a string form of '1,2', '3..5'\n * or '1,3..5' where '3..5' indicates a number sequence starting from 2 to 5.\n *\n * @param {string} optionValue\n * @return {Array<number>}\n */\nfunction _formatStringOptionValue(optionValue) {\n  let valueArray = [];\n\n  optionValue.split(',').forEach(function (val) {\n    if (val.indexOf('..') > 0) {\n      const arr = val.split('..');\n      const filledArray = _getFilledArray(arr.shift(), arr.pop());\n      valueArray = valueArray.concat(filledArray);\n    } else {\n      valueArray.push(val);\n    }\n  });\n\n  return valueArray;\n}\n\n/**\n * Generates multiple test pages: for a given baseUrl, it appends the partition numbers\n * or the browserId each page is running as query params.\n *\n * @param {string} customBaseUrl\n * @param {string} appendingParam\n * @param {Array<number} browserIds\n * @return {Array<string>} testPages\n */\nfunction _generateTestPages(customBaseUrl, appendingParam, browserIds) {\n  const testPages = [];\n  for (let i = 0; i < browserIds.length; i++) {\n    const url = addToUrl(customBaseUrl, appendingParam, browserIds[i]);\n    testPages.push(url);\n  }\n\n  return testPages;\n}\n\n/**\n * Creates an array of numbers between the range of start to end.\n *\n * @param {number} start\n * @param {number} end\n * @return {Array}\n */\nfunction _getFilledArray(start, end) {\n  const length = end - start + 1;\n  return Array.from({ length }, (_, i) => i + Number(start));\n}\n\n/**\n * returns the failed browsers from the test execution json defined in executionJsonPath\n * other wise return an array of 1 to number of browsers spawned during the execution\n *\n * @param {string} executionJsonPath\n * @return {Array<number>} testPages\n */\nfunction _getReplayBrowsers(executionJsonPath) {\n  const executionJson = fs.readJsonSync(executionJsonPath);\n\n  if (executionJson.failedBrowsers.length > 0) {\n    return executionJson.failedBrowsers;\n  }\n  return _getFilledArray(1, executionJson.numberOfBrowsers);\n}\n\n/**\n * Returns an array populated with numeric values represented by the optionValue.\n * e.g. [1, '2,3'] => [1, 2, 3], [1, '3..6'] => [1, 3, 4, 5, 6]\n *\n * @param {*} optionValue\n * @return {Array<number}\n */\nfunction combineOptionValueIntoArray(optionValue) {\n  if (!optionValue) return [];\n\n  let optionArray = Array.isArray(optionValue) ? optionValue : [optionValue];\n\n  return optionArray.reduce((result, element) => {\n    if (typeof element === 'string') {\n      return result.concat(_formatStringOptionValue(element));\n    }\n    return result.concat(element);\n  }, []);\n}\n\n/**\n * Returns the browserId of launcher\n *\n * @param {Object} launcher\n * @return {string}\n */\nfunction getBrowserId(launcher) {\n  try {\n    const testPage = launcher.settings.test_page;\n    const browserIdMatch = /browser=\\s*([0-9]*)/.exec(testPage);\n\n    if (Array.isArray(browserIdMatch) !== null && browserIdMatch !== null) {\n      return browserIdMatch[1];\n    }\n  } catch (err) {\n    const errMsg = `${err.message} \\n${\n      err.stack\n    } \\nLauncher Settings: ${JSON.stringify(launcher.settings, null, 2)}`;\n    console.warn(errMsg);\n  }\n  return 0;\n}\n\n/**\n * Gets a test url in testem config to modify the url in order to generate multiple test pages\n *\n * @param {Object} configFile\n * @return {string} testPage\n */\nfunction getTestUrlFromTestemConfig(configFile) {\n  // Attempt to read in the testem config and use the test_page definition\n  const testemConfig = readTestemConfig(configFile);\n  let testPage = testemConfig && testemConfig.test_page;\n\n  // If there is no test_page to use as the testPage, we warn that we're using\n  // a default value\n  if (!testPage) {\n    console.warn(\n      'No test_page value found in the config. Defaulting to \"tests/index.html?hidepassed\"',\n    );\n    testPage = 'tests/index.html?hidepassed';\n  }\n\n  // Get the testPage from the generated config or the Testem config and\n  // use it as the baseUrl to customize for the parallelized test pages or test load balancing\n  return testPage;\n}\n\n/**\n * Creates an array of custom base urls by appending options that are specified\n *\n * @param {Object} commandOptions\n * @param {*} baseUrl\n * @return {string}\n */\nfunction getCustomBaseUrl(commandOptions, baseUrl) {\n  if (Array.isArray(baseUrl)) {\n    return baseUrl.map((currentUrl) => {\n      return _appendParamToBaseUrl(commandOptions, currentUrl);\n    });\n  } else {\n    return _appendParamToBaseUrl(commandOptions, baseUrl);\n  }\n}\n\n/**\n * Ember-exam allows serving multiple browsers to run test suite. In order to acheive that test_page in testem config\n * has to be set with an array of multiple urls reflecting to command passed.\n *\n * @param {Object} config\n * @param {Object} commandOptions\n * @return {Array<string>} testPages\n */\nfunction getMultipleTestPages(config, commandOptions) {\n  let testPages = Object.create(null);\n  let browserIds = combineOptionValueIntoArray(commandOptions.partition);\n  let appendingParam = 'partition';\n\n  if (commandOptions.loadBalance) {\n    appendingParam = 'browser';\n    browserIds = _getFilledArray(1, commandOptions.parallel);\n  } else if (commandOptions.parallel === 1 && browserIds.length === 0) {\n    browserIds = _getFilledArray(1, commandOptions.split);\n  } else if (commandOptions.replayExecution) {\n    appendingParam = 'browser';\n    browserIds = combineOptionValueIntoArray(commandOptions.replayBrowser);\n    if (browserIds.length === 0) {\n      browserIds = _getReplayBrowsers(commandOptions.replayExecution);\n    }\n  }\n\n  const baseUrl =\n    config.testPage || getTestUrlFromTestemConfig(commandOptions.configFile);\n  const customBaseUrl = getCustomBaseUrl(commandOptions, baseUrl);\n\n  if (Array.isArray(customBaseUrl)) {\n    testPages = customBaseUrl.reduce(function (testPages, customBaseUrl) {\n      return testPages.concat(\n        _generateTestPages(customBaseUrl, appendingParam, browserIds),\n      );\n    }, []);\n  } else {\n    testPages = _generateTestPages(customBaseUrl, appendingParam, browserIds);\n  }\n\n  return testPages;\n}\n\nmodule.exports = {\n  combineOptionValueIntoArray,\n  getBrowserId,\n  getCustomBaseUrl,\n  getMultipleTestPages,\n  getTestUrlFromTestemConfig,\n};\n"
  },
  {
    "path": "lib/utils/testem-events.js",
    "content": "'use strict';\n\nconst fs = require('fs-extra');\nconst path = require('path');\nconst ExecutionStateManager = require('./execution-state-manager');\nconst { getBrowserId } = require('../utils/test-page-helper');\nconst writeJsonToFile = require('./file-system-helper');\n\n/**\n * Return sorted module metadata object by module duration.\n *\n * @param {Map} moduleMetadata\n */\nfunction getSortedModuleMetaData(moduleMetadata) {\n  return new Map(\n    [...moduleMetadata.entries()].sort((a, b) => b[1].duration - a[1].duration),\n  );\n}\n\n/**\n * A class to coordinate testem events to enable load-balance functionality.\n *\n * @class TestemEvents\n */\nclass TestemEvents {\n  constructor(root) {\n    this.stateManager = new ExecutionStateManager();\n    this.root = root;\n  }\n\n  /**\n   * Read the executionFilePath then:\n   * if failed browsers are available, set the module map to the modules from the failed browsers.\n   * else if replay-browser param is passed, set the module map specified browser id\n   * else set module map to all the browser ran, effectively rerunning the same execution\n   *\n   * @param {string} executionFilePath\n   * @param {Array<number>} browserIdsToReplay\n   * @return {Object}\n   */\n  setReplayExecutionMap(executionFilePath, browserIdsToReplay) {\n    const browserModuleMap = new Map();\n    let executionJson;\n\n    try {\n      executionJson = fs.readJsonSync(executionFilePath);\n    } catch (err) {\n      throw new Error(`Error reading reply execution JSON file - ${err}`);\n    }\n\n    if (browserIdsToReplay && browserIdsToReplay.length > 0) {\n      browserIdsToReplay.forEach((browserId) => {\n        browserModuleMap.set(\n          browserId.toString(),\n          executionJson.executionMapping[browserId.toString()],\n        );\n      });\n    } else if (executionJson.failedBrowsers.length > 0) {\n      executionJson.failedBrowsers.forEach((browserId) => {\n        browserModuleMap.set(\n          browserId,\n          executionJson.executionMapping[browserId],\n        );\n      });\n    } else {\n      for (\n        let browserId = 1;\n        browserId <= executionJson.numberOfBrowsers;\n        browserId++\n      ) {\n        browserModuleMap.set(\n          browserId.toString(),\n          executionJson.executionMapping[browserId.toString()],\n        );\n      }\n    }\n\n    this.stateManager.setReplayExecutionMap(browserModuleMap);\n  }\n\n  /**\n   * Set the moduleQueue, a list of test modules to be passed to browsers to execute.\n   *\n   * @param {number} browserId\n   * @param {Array<string>} modules\n   * @param {boolean} loadBalance\n   * @param {boolean} replayExecution\n   * @param {string} writeExecutionFile\n   */\n  setModuleQueue(browserId, modules, loadBalance, replayExecution) {\n    const replayExecutionMap = this.stateManager.getReplayExecutionMap();\n\n    if (replayExecution) {\n      if (!replayExecutionMap) {\n        throw new Error('No replay execution map was set on the stateManager.');\n      } else if (!this.stateManager.getReplayExecutionModuleQueue(browserId)) {\n        // Only set the moduleQueue once, ignore repeated requests\n        this.stateManager.setReplayExecutionModuleQueue(\n          replayExecutionMap.get(browserId),\n          browserId,\n        );\n      }\n    } else if (loadBalance && !this.stateManager.getTestModuleQueue()) {\n      // Only set the moduleQueue once, ignore repeated requests\n      this.stateManager.setTestModuleQueue(modules);\n    }\n  }\n\n  /**\n   * Gets the next test module from the moduleQueue and emit back to the browser.\n   * If moduleQueue is already empty, emit the module-queue-complete event,\n   * signaling no more test module to run.\n   *\n   * @param {number} browserId\n   * @param {Object} socket\n   * @param {boolean} loadBalance\n   * @param {boolean} writeExecutionFile\n   */\n  nextModuleResponse(browserId, socket, writeExecutionFile) {\n    const moduleQueue =\n      this.stateManager.getTestModuleQueue() ||\n      this.stateManager.getReplayExecutionModuleQueue(browserId);\n    if (!moduleQueue) {\n      throw new Error('No moduleQueue was set.');\n    }\n\n    const moduleName = moduleQueue.shift();\n    socket.emit('testem:next-module-response', {\n      done: !moduleQueue.length && !moduleName,\n      value: moduleName,\n    });\n\n    // Keep track of the modules executed per browserId when running test suite with load-balance.\n    // In replay-execution mode, we are already running a predefined set of modules, so no need\n    // to save this again\n    if (moduleName && writeExecutionFile) {\n      this.stateManager.addModuleNameToReplayExecutionMap(\n        moduleName,\n        browserId,\n      );\n    }\n  }\n\n  /**\n   * Record the launched browser id\n   *\n   * @param {number} browserId\n   */\n  recordStartedLauncherId(browserId) {\n    this.stateManager.addToStartedLaunchers(browserId);\n  }\n\n  /**\n   * Record the module run details to the stateManager\n   *\n   * @param {Object} metaData\n   */\n  recordModuleMetadata(metaData) {\n    this.stateManager.addToModuleMetadata(metaData);\n  }\n\n  /**\n   * Gets browser id of launcher and stores the browser id stateManager\n   *\n   * @param {Object} launcher\n   * @param {Object} ui\n   */\n  recordFailedBrowserId(launcher, ui) {\n    let browserId;\n    try {\n      browserId = getBrowserId(launcher);\n    } catch (err) {\n      ui.writeLine(err.message);\n    }\n    if (\n      (browserId !== null || typeof browserId !== 'undefined') &&\n      !this.stateManager.containsFailedBrowser(browserId)\n    ) {\n      this.stateManager.addFailedBrowsers(browserId);\n    }\n  }\n\n  /**\n   * Generates an object for test execution\n   *\n   * @param {number} browserCount\n   */\n  _generatesModuleMapJsonObject(browserCount) {\n    return {\n      numberOfBrowsers: browserCount,\n      failedBrowsers: this.stateManager.getFailedBrowsers(),\n      executionMapping: (() => {\n        let executionMapping = Object.create(null);\n        for (const [\n          browserId,\n          moduleList,\n        ] of this.stateManager.getModuleMap()) {\n          executionMapping[browserId] = moduleList;\n        }\n\n        return executionMapping;\n      })(),\n    };\n  }\n\n  /**\n   * Keep track of the number of browsers that completed executing its tests.\n   * When all browsers complete, write test-execution.json to disk and clean up the stateManager\n   *\n   * @param {number} browserCount\n   * @param {number} launcherId\n   * @param {Object} ui\n   * @param {Object} commands\n   * @param {Object} currentDate\n   */\n  completedBrowsersHandler(\n    browserCount,\n    launcherId,\n    ui,\n    commands,\n    currentDate,\n  ) {\n    const browsersStarted = this.stateManager.getStartedLaunchers();\n    let browsersCompleted = false;\n\n    this.stateManager.incrementCompletedBrowsers(launcherId);\n    const completedBrowser = this.stateManager.getCompletedBrowser();\n    if (completedBrowser === browsersStarted.size) {\n      if (commands.get('writeModuleMetadataFile')) {\n        const moduleDetailFileName = path.join(\n          this.root,\n          `module-metadata-${currentDate}.json`,\n        );\n        const sortedModuleMetadata = getSortedModuleMetaData(\n          this.stateManager.getModuleMetadata(),\n        );\n\n        writeJsonToFile(\n          moduleDetailFileName,\n          {\n            requested: `${browserCount} browser(s)`,\n            launched: `${browsersStarted.size} browser(s)`,\n            modules: Array.from(sortedModuleMetadata.values()),\n          },\n          { spaces: 2 },\n        );\n        ui.writeLine(\n          `\\nExecution module details were recorded at ${moduleDetailFileName}`,\n        );\n      }\n\n      if (commands.get('writeExecutionFile') && commands.get('loadBalance')) {\n        const moduleMapJson = this._generatesModuleMapJsonObject(browserCount);\n        const testExecutionPath = path.join(\n          this.root,\n          `test-execution-${currentDate}.json`,\n        );\n\n        writeJsonToFile(testExecutionPath, moduleMapJson, { spaces: 2 });\n        ui.writeLine(`\\nExecution was recorded at ${testExecutionPath}`);\n      }\n\n      ui.writeLine(\n        `Out of requested ${browserCount} browser(s), ${browsersStarted.size} browser(s) was launched & completed.`,\n      );\n\n      if (browserCount !== browsersStarted.size) {\n        ui.writeLine('Waiting for remaining browsers to exited.');\n      }\n    }\n\n    if (completedBrowser === browserCount) {\n      ui.writeLine('All browsers to exited.');\n      // --server mode allows rerun of tests by refreshing the browser\n      // replayExecutionMap should be reused so the test-execution json\n      // does not need to be reread\n      const replayExecutionMap = this.stateManager.getReplayExecutionMap();\n      this.stateManager = new ExecutionStateManager(replayExecutionMap);\n      browsersCompleted = true;\n    }\n    return browsersCompleted;\n  }\n}\n\nmodule.exports = TestemEvents;\n"
  },
  {
    "path": "lib/utils/tests-options-validator.js",
    "content": "'use strict';\n\nconst fs = require('fs-extra');\nconst SilentError = require('silent-error');\nconst semver = require('semver');\n\n/**\n * Validates the specified partitions\n *\n * @private\n * @param {Array<Number>} partitions\n * @param {Number} split\n */\nfunction validatePartitions(partitions, split) {\n  validatePartitionSplit(partitions, split);\n  validateElementsUnique(partitions, 'partition');\n}\n\n/**\n * Returns thr number of browsers defined within the test execution file.\n *\n * @param {*} fileName\n */\nfunction getNumberOfBrowser(fileName) {\n  const executionJson = fs.readJsonSync(fileName);\n  return executionJson.numberOfBrowsers;\n}\n\n/**\n * Validates the specified replay-browser\n *\n * @param {String} replayExecution\n * @param {Array<Number>} replayBrowser\n */\nfunction validateReplayBrowser(replayExecution, replayBrowser) {\n  if (!replayExecution) {\n    throw new SilentError(\n      'EmberExam: You must specify replay-execution when using the replay-browser option.',\n    );\n  }\n\n  const numberOfBrowsers = getNumberOfBrowser(replayExecution);\n\n  for (const i in replayBrowser) {\n    const browserId = replayBrowser[i];\n    if (browserId < 1) {\n      throw new SilentError(\n        'EmberExam: You must specify replay-browser values greater than or equal to 1.',\n      );\n    }\n    if (browserId > numberOfBrowsers) {\n      throw new SilentError(\n        'EmberExam: You must specify replayBrowser value smaller than a number of browsers in the specified json file.',\n      );\n    }\n  }\n\n  validateElementsUnique(replayBrowser, 'replayBrowser');\n}\n\n/**\n * Determines if the specified partitions value makes sense for a given split.\n *\n * @private\n * @param {Array<Number>} partitions\n * @param {Number} split\n */\nfunction validatePartitionSplit(partitions, split) {\n  if (!split) {\n    throw new SilentError(\n      'EmberExam: You must specify a `split` value in order to use `partition`.',\n    );\n  }\n\n  for (let i = 0; i < partitions.length; i++) {\n    const partition = partitions[i];\n    if (partition < 1) {\n      throw new SilentError(\n        'EmberExam: Split tests are one-indexed, so you must specify partition values greater than or equal to 1.',\n      );\n    }\n    if (partition > split) {\n      throw new SilentError(\n        'EmberExam: You must specify `partition` values that are less than or equal to your `split` value.',\n      );\n    }\n  }\n}\n\n/**\n * Ensures that there is no value duplicated in a given array.\n *\n * @private\n * @param {Array} arr\n * @param {String} typeOfValue\n */\nfunction validateElementsUnique(arr, typeOfValue) {\n  const sorted = arr.slice().sort();\n  for (let i = 0; i < sorted.length - 1; i++) {\n    if (sorted[i] === sorted[i + 1]) {\n      const errorMsg = `EmberExam: You cannot specify the same ${typeOfValue} value twice. ${sorted[i]} is repeated.`;\n      throw new SilentError(errorMsg);\n    }\n  }\n}\n\n/**\n * Performs logic related to validating command options for testing and\n * determining which functions to run on the tests.\n *\n * @class TestsOptionsValidator\n */\nmodule.exports = class TestsOptionsValidator {\n  constructor(options, emberCliVersion) {\n    this.options = options;\n    this.emberCliVersion = emberCliVersion;\n  }\n\n  /**\n   * Validates the command and returns a map of the options and whether they are enabled or not.\n   *\n   * @public\n   * @return {Object} Map of the options and whether they are enabled or not.\n   */\n  validateCommands() {\n    const validatedOptions = new Map();\n    if (this.options.writeModuleMetadataFile) {\n      validatedOptions.set('writeModuleMetadataFile', true);\n    }\n\n    if (this.options.split || this.options.partition) {\n      validatedOptions.set('split', this.validateSplit());\n    }\n\n    // The parallel option accepts a number, which can be 0\n    if (typeof this.options.parallel !== 'undefined') {\n      validatedOptions.set('parallel', this.validateParallel());\n    }\n\n    // As random option can be an empty string it should check a type of random option rather than the option is not empty.\n    if (typeof this.options.random !== 'undefined') {\n      validatedOptions.set('random', this.validateRandom());\n    }\n\n    if (typeof this.options.writeExecutionFile !== 'undefined') {\n      validatedOptions.set(\n        'writeExecutionFile',\n        this.validateWriteExecutionFile(),\n      );\n    }\n\n    if (this.options.loadBalance) {\n      validatedOptions.set('loadBalance', this.validateLoadBalance());\n    }\n\n    if (this.options.replayExecution || this.options.replayBrowser) {\n      validatedOptions.set('replayExecution', this.validateReplayExecution());\n    }\n\n    return validatedOptions;\n  }\n\n  /**\n   * Determines if we should split the tests file and validates associated options\n   * (`split`, `partition`).\n   *\n   * @return {boolean}\n   */\n  validateSplit() {\n    const options = this.options;\n    let split = options.split;\n\n    if (typeof split !== 'undefined' && split < 2) {\n      console.warn(\n        'You should specify a number of files greater than 1 to split your tests across. Defaulting to 1 split which is the same as not using `split`.',\n      );\n      split = 1;\n    }\n\n    if (\n      typeof split !== 'undefined' &&\n      typeof this.options.replayBrowser !== 'undefined'\n    ) {\n      throw new SilentError(\n        'EmberExam: You must not use the `replay-browser` option with the `split` option.',\n      );\n    }\n\n    if (typeof split !== 'undefined' && this.options.replayExecution) {\n      throw new SilentError(\n        'EmberExam: You must not use the `replay-execution` option with the `split` option.',\n      );\n    }\n\n    const partitions = options.partition;\n\n    if (typeof partitions !== 'undefined') {\n      validatePartitions(partitions, split);\n    }\n\n    return !!split;\n  }\n\n  /**\n   * Determines if we should randomize the tests and validates associated options\n   * (`random`).\n   *\n   * @return {boolean}\n   */\n  validateRandom() {\n    return typeof this.options.random === 'string';\n  }\n\n  /**\n   * Determines if a test execution json file should be written after running a test suite and validates associated\n   *\n   *  @return {boolean}\n   */\n  validateWriteExecutionFile() {\n    if (!this.options.writeExecutionFile) {\n      return false;\n    }\n\n    if (!this.options.loadBalance) {\n      throw new SilentError(\n        'EmberExam: You must run test suite with the `load-balance` option in order to use the `write-execution-file` option.',\n      );\n    } else if (this.options.launch === 'false') {\n      //When `--no-launch` option is passed, a value for launch in testem config is set to be string false.\n      throw new SilentError(\n        'EmberExam: You must not use no-launch with write-execution-file option.',\n      );\n    }\n\n    return true;\n  }\n\n  /**\n   * Determines if we should run split tests in parallel and validates associated\n   * options (`parallel`).\n   *\n   * @return {boolean}\n   */\n  validateParallel() {\n    const parallelValue = parseInt(this.options.parallel, 10);\n\n    if (isNaN(parallelValue)) {\n      throw new SilentError(\n        `EmberExam: You must specify a Numeric value to 'parallel'. Value passed: ${this.options.parallel}`,\n      );\n    }\n    this.options.parallel = parallelValue;\n\n    if (typeof this.options.replayBrowser !== 'undefined') {\n      throw new SilentError(\n        'EmberExam: You must not use the `replay-browser` option with the `parallel` option.',\n      );\n    }\n\n    if (this.options.replayExecution) {\n      throw new SilentError(\n        'EmberExam: You must not use the `replay-execution` option with the `parallel` option.',\n      );\n    }\n\n    if (!this.options.loadBalance) {\n      if (!this.options.split) {\n        throw new SilentError(\n          'EmberExam: You must specify the `split` option in order to run your tests in parallel.',\n        );\n      } else if (this.options.parallel !== 1) {\n        throw new SilentError(\n          'EmberExam: When used with `split` or `partition`, `parallel` does not accept a value other than 1.',\n        );\n      }\n    }\n\n    if (this.options.parallel < 1) {\n      throw new SilentError(\n        'EmberExam: You must specify a value greater than 1 to `parallel`.',\n      );\n    }\n\n    return true;\n  }\n\n  /**\n   * Determines if we should run tests in load balance mode.\n   * options (`load-balance`).\n   *\n   * @return {boolean}\n   */\n  validateLoadBalance() {\n    // It's required to use ember-cli version 3.2.0 or greater to support the `load-balance` feature.\n    const emberCliVersionRange = semver.validRange(this.emberCliVersion);\n    if (semver.gtr('3.2.0', emberCliVersionRange)) {\n      throw new SilentError(\n        'EmberExam: You must be using ember-cli version ^3.2.0 for this feature to work properly.',\n      );\n    }\n\n    if (typeof this.options.replayBrowser !== 'undefined') {\n      throw new SilentError(\n        'EmberExam: You must not use the `replay-browser` option with the `load-balance` option.',\n      );\n    }\n\n    if (this.options.replayExecution) {\n      throw new SilentError(\n        'EmberExam: You must not use the `replay-execution` option with the `load-balance` option.',\n      );\n    }\n\n    //When `--no-launch` option is passed, a value for launch in testem config is set to be string false.\n    if (this.options.launch === 'false') {\n      throw new SilentError(\n        'EmberExam: You must not use `no-launch` option with the `load-balance` option.',\n      );\n    }\n\n    if (!this.options.parallel) {\n      throw new SilentError(\n        'EmberExam: You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`.',\n      );\n    }\n\n    return true;\n  }\n\n  /**\n   * Determines if we should replay execution for reproduction.\n   * options (`replay-execution`).\n   *\n   * @return {boolean}\n   */\n  validateReplayExecution() {\n    const replayBrowser = this.options.replayBrowser;\n    const replayExecution = this.options.replayExecution;\n\n    if (!replayExecution) {\n      return false;\n    }\n\n    if (this.options.launch === 'false') {\n      throw new SilentError(\n        'EmberExam: You must not use `no-launch` option with the `replay-execution` option.',\n      );\n    }\n\n    if (replayBrowser) {\n      validateReplayBrowser(replayExecution, replayBrowser, this.options);\n    }\n\n    return true;\n  }\n};\n"
  },
  {
    "path": "node-tests/.eslintrc",
    "content": "{\n  \"env\": {\n    \"mocha\": true\n  },\n  \"rules\": {\n    \"no-var\": 0\n  }\n}\n"
  },
  {
    "path": "node-tests/acceptance/exam/vite/vite-test.js",
    "content": "const path = require('path');\nconst assert = require('assert');\n\nconst { rimrafSync } = require('rimraf');\nconst { ROOT, execa, getNumberOfTests } = require('../../helpers.js');\n\nconst DIR = path.resolve(ROOT, 'test-apps/vite-with-compat');\nconst TEST_OUTPUT_DIR = 'dist';\n\ndescribe('Command | exam | vite', function () {\n  this.timeout(300000);\n\n  before(function () {\n    // Cleanup any previous runs\n    rimrafSync(TEST_OUTPUT_DIR);\n\n    // Build the app\n    return execa('pnpm', ['build:tests', '--outDir', TEST_OUTPUT_DIR], {\n      cwd: DIR,\n    });\n  });\n\n  after(function () {\n    rimrafSync(TEST_OUTPUT_DIR);\n  });\n\n  describe('without exam', function () {\n    it('has passing tests with just testem', async function () {\n      let result = await execa('testem', ['ci'], {\n        cwd: DIR,\n        env: {\n          TESTEM_DIR: TEST_OUTPUT_DIR,\n        },\n      });\n\n      assert.strictEqual(getNumberOfTests(result.stdout), 6);\n      assert.strictEqual(result.stdout.includes('Suite A'), true);\n      assert.strictEqual(result.stdout.includes('Suite B'), true);\n    });\n  });\n\n  describe('split', function () {\n    describe('parallel', function () {\n      it('has no shared tests between partitions', async function () {\n        let resultA = await execa(\n          'ember',\n          [\n            'exam',\n            '--test-port',\n            '1337',\n            '--split',\n            '2',\n            '--partition',\n            '1',\n            '--path',\n            TEST_OUTPUT_DIR,\n            '--parallel',\n          ],\n          { cwd: DIR },\n        );\n\n        let resultB = await execa(\n          'ember',\n          [\n            'exam',\n            '--test-port',\n            '1338',\n            '--split',\n            '2',\n            '--partition',\n            '2',\n            '--path',\n            TEST_OUTPUT_DIR,\n            '--parallel',\n          ],\n          { cwd: DIR },\n        );\n\n        assert.strictEqual(getNumberOfTests(resultA.stdout), 3);\n        assert.strictEqual(resultA.stdout.includes('Suite A'), true);\n        assert.strictEqual(resultA.stdout.includes('Suite B'), false);\n\n        assert.strictEqual(getNumberOfTests(resultB.stdout), 3);\n        assert.strictEqual(resultB.stdout.includes('Suite B'), true);\n        assert.strictEqual(resultB.stdout.includes('Suite A'), false);\n      });\n    });\n  });\n\n  describe('loadBalance', function () {\n    it('has no shared tests between partitions', async function () {\n      let result = await execa(\n        'ember',\n        [\n          'exam',\n          '--test-port',\n          '1339',\n          '--load-balance',\n          '--path',\n          TEST_OUTPUT_DIR,\n          '--parallel',\n          '2',\n        ],\n        { cwd: DIR },\n      );\n\n      assert.strictEqual(getNumberOfTests(result.stdout), 6);\n      assert.strictEqual(result.stdout.includes('Suite A'), true);\n      assert.strictEqual(result.stdout.includes('Suite B'), true);\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/acceptance/exam-iterate-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst { rimrafSync } = require('rimraf');\nconst fs = require('fs-extra');\nconst path = require('path');\n\nfunction assertExpectRejection() {\n  assert.ok(false, 'Expected promise to reject, but it fullfilled');\n}\n\nasync function execa(command, args) {\n  const { execa: originalExeca } = await import('execa');\n  return originalExeca(command, args);\n}\n\ndescribe('Acceptance | Exam Iterate Command', function () {\n  this.timeout(300000);\n\n  it('should build the app, test it a number of times, and clean it up', function () {\n    return execa('ember', ['exam:iterate', '2']).then((child) => {\n      const stdout = child.stdout;\n      assert.ok(\n        stdout.includes('Building app for test iterations.'),\n        'Logged building message from command',\n      );\n      assert.ok(\n        stdout.includes('Built project successfully.'),\n        'Built successfully according to Ember-CLI',\n      );\n\n      assert.ok(\n        stdout.includes('Running iteration #1.'),\n        'Logs first iteration',\n      );\n      assert.ok(\n        stdout.includes('Running iteration #2.'),\n        'Logs second iteration',\n      );\n\n      const seedRE = /Randomizing tests with seed: (.*)/g;\n\n      const firstSeed = seedRE.exec(stdout)[1];\n      const secondSeed = seedRE.exec(stdout)[1];\n\n      assert.ok(firstSeed, 'first seed exists');\n      assert.ok(secondSeed, 'second seed exists');\n      assert.notEqual(\n        firstSeed,\n        secondSeed,\n        'the first and second seeds are not the same',\n      );\n\n      assert.ok(\n        stdout.includes('Cleaning up test iterations.'),\n        'Logged cleaning up message from command',\n      );\n      assert.throws(\n        () => fs.accessSync('iteration-dist', fs.F_OK),\n        'iteration-dist is cleaned up',\n      );\n    });\n  });\n\n  it('should test the app with additional options passed in and catch failure cases', function () {\n    const execution = execa('ember', [\n      'exam:iterate',\n      '2',\n      '--options',\n      '--parallel',\n    ]);\n    return execution.then(assertExpectRejection, (error) => {\n      const splitErrorRE =\n        /You must specify the `split` option in order to run your tests in parallel./g;\n\n      assert.ok(\n        splitErrorRE.test(error.stderr),\n        'expected stderr to contain the appropriate error message',\n      );\n      assert.strictEqual(error.exitCode, 1);\n      assert.strictEqual(error.failed, true);\n      assert.strictEqual(error.killed, false);\n    });\n  });\n\n  describe('building', function () {\n    const buildDir = path.join(process.cwd(), 'dist');\n\n    afterEach(() => rimrafSync(buildDir));\n\n    it('should not build the app or clean it up, but use an existing build to test', function () {\n      return execa('ember', ['build']).then(() => {\n        execa('ember', ['exam:iterate', '2', '--path', 'dist']).then(\n          (child) => {\n            const stdout = child.stdout;\n\n            assert.ok(\n              !stdout.includes('Building app for test iterations.'),\n              'No logged building message from command',\n            );\n            assert.ok(\n              !stdout.includes('Built project successfully.'),\n              'Not built successfully according to Ember-CLI',\n            );\n\n            assert.ok(\n              stdout.includes('Running iteration #1.'),\n              'Logs first iteration',\n            );\n            assert.ok(\n              stdout.includes('Running iteration #2.'),\n              'Logs second iteration',\n            );\n\n            const seedRE = /Randomizing tests with seed: (.*)/g;\n\n            const firstSeed = seedRE.exec(stdout)[1];\n            const secondSeed = seedRE.exec(stdout)[1];\n\n            assert.ok(firstSeed, 'first seed exists');\n            assert.ok(secondSeed, 'second seed exists');\n            assert.notEqual(\n              firstSeed,\n              secondSeed,\n              'the first and second seeds are not the same',\n            );\n\n            assert.ok(\n              !stdout.includes('Cleaning up test iterations.'),\n              'No logged cleaning up message from command',\n            );\n            assert.throws(\n              () => fs.accessSync('iteration-dist', fs.F_OK),\n              'iteration-dist is non-existent',\n            );\n\n            assert.doesNotThrow(\n              () => fs.accessSync(buildDir, fs.F_OK),\n              'dist is not cleaned up',\n            );\n          },\n        );\n      });\n    });\n  });\n\n  describe('Exit Code', function () {\n    const destPath = path.join(\n      __dirname,\n      '..',\n      '..',\n      'tests',\n      'unit',\n      'failing-test.js',\n    );\n\n    beforeEach(function () {\n      const failingTestPath = path.join(\n        __dirname,\n        '..',\n        'fixtures',\n        'failure.js',\n      );\n      fs.copySync(failingTestPath, destPath);\n    });\n\n    afterEach(function () {\n      fs.removeSync(destPath);\n    });\n\n    it('should have an exitCode of 1 when a test fails', function () {\n      return execa('ember', ['exam:iterate', '1']).then(\n        assertExpectRejection,\n        (error) => {\n          assert.strictEqual(error.exitCode, 1);\n          assert.strictEqual(error.killed, false);\n        },\n      );\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/acceptance/exam-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst fixturify = require('fixturify');\nconst fs = require('fs-extra');\nconst path = require('path');\nconst { rimrafSync } = require('rimraf');\nconst glob = require('glob');\nconst { execa, getNumberOfTests } = require('./helpers');\n\nfunction assertExpectRejection() {\n  assert.ok(false, 'Expected promise to reject, but it fullfilled');\n}\n\nconst TOTAL_NUM_TESTS = 67; // Total Number of tests without the global 'Ember.onerror validation tests'\n\nfunction getTotalNumberOfTests(output) {\n  // In ember-qunit 3.4.0, this new check was added: https://github.com/emberjs/ember-qunit/commit/a7e93c4b4b535dae62fed992b46c00b62bfc83f4\n  // which adds this Ember.onerror validation test.\n  // As Ember.onerror validation test is added per browser the total number of tests executed should be the sum of TOTAL_NUM_TESTS defined and a number of browsers.\n  const emberOnerror = output.match(\n    /ember-qunit: Ember.onerror validation: Ember.onerror is functioning properly/g,\n  );\n  return TOTAL_NUM_TESTS + (emberOnerror ? emberOnerror.length : 0);\n}\n\ndescribe('Acceptance | Exam Command', function () {\n  this.timeout(300000);\n\n  before(function () {\n    // Cleanup any previous runs\n    rimrafSync('acceptance-dist');\n\n    // Build the app\n    return execa('ember', ['build', '--output-path', 'acceptance-dist']);\n  });\n\n  after(function () {\n    rimrafSync('acceptance-dist');\n  });\n\n  function assertOutput(output, text, good, bad) {\n    good.forEach(function (partition) {\n      assert.ok(\n        output.includes(`${text} ${partition} `),\n        `output has ${text} ${partition}`,\n      );\n    });\n\n    (bad || []).forEach(function (partition) {\n      assert.ok(\n        !output.includes(`${text} ${partition} `),\n        `output does not have ${text} ${partition}`,\n      );\n    });\n  }\n\n  function assertAllPartitions(output) {\n    assertOutput(output, 'Exam Partition', [1, 2, 3]);\n    assert.strictEqual(\n      getNumberOfTests(output),\n      getTotalNumberOfTests(output),\n      'ran all of the tests in the suite',\n    );\n  }\n\n  function assertSomePartitions(output, good, bad) {\n    assertOutput(output, 'Exam Partition', good, bad);\n    assert.ok(\n      getNumberOfTests(output) < getTotalNumberOfTests(output),\n      'did not run all of the tests in the suite',\n    );\n  }\n\n  it('runs all tests normally', function () {\n    return execa('ember', ['exam', '--path', 'acceptance-dist']).then(\n      (child) => {\n        const stdout = child.stdout;\n        assert.ok(\n          !stdout.includes('Exam Partition'),\n          'does not add any sort of partition info',\n        );\n        assert.strictEqual(\n          getNumberOfTests(stdout),\n          getTotalNumberOfTests(stdout),\n          'ran all of the tests in the suite',\n        );\n      },\n    );\n  });\n\n  describe('Execute tests with load() in test-helper', function () {\n    const originalTestHelperPath = path.join(\n      __dirname,\n      '..',\n      '..',\n      'tests',\n      'test-helper.js',\n    );\n\n    const renamedOriginalTestHelperPath = path.join(\n      __dirname,\n      '..',\n      '..',\n      'tests',\n      'test-helper-orig.js',\n    );\n\n    const testHelperWithLoadPath = path.join(\n      __dirname,\n      '..',\n      'fixtures',\n      'test-helper-with-load.js',\n    );\n    before(function () {\n      // Use test-helper-with-load.js as the test-helper.js file\n      fs.renameSync(originalTestHelperPath, renamedOriginalTestHelperPath);\n      fs.copySync(testHelperWithLoadPath, originalTestHelperPath);\n\n      // Build the app\n      return execa('ember', [\n        'build',\n        '--output-path',\n        'acceptance-with-load-dist',\n      ]);\n    });\n\n    after(function () {\n      rimrafSync('acceptance-with-load-dist');\n\n      // restore the original test-helper.js file\n      fs.unlinkSync(originalTestHelperPath);\n      fs.renameSync(renamedOriginalTestHelperPath, originalTestHelperPath);\n    });\n\n    it('runs all tests normally', function () {\n      return execa('ember', [\n        'exam',\n        '--path',\n        'acceptance-with-load-dist',\n      ]).then((child) => {\n        const stdout = child.stdout;\n        assert.ok(\n          !stdout.includes('Exam Partition'),\n          'does not add any sort of partition info',\n        );\n        assert.strictEqual(\n          getNumberOfTests(stdout),\n          getTotalNumberOfTests(stdout),\n          'ran all of the tests in the suite',\n        );\n      });\n    });\n  });\n\n  describe('Split', function () {\n    it('splits the test suite but only runs the first partition', function () {\n      return execa('ember', [\n        'exam',\n        '--split',\n        '3',\n        '--path',\n        'acceptance-dist',\n      ]).then((child) => {\n        assertSomePartitions(child.stdout, [1], [2, 3]);\n      });\n    });\n\n    describe('Partition', function () {\n      it('splits the test suite and runs a specified partition', function () {\n        return execa('ember', [\n          'exam',\n          '--split',\n          '3',\n          '--partition',\n          '2',\n          '--path',\n          'acceptance-dist',\n        ]).then((child) => {\n          assertSomePartitions(child.stdout, [2], [1, 3]);\n        });\n      });\n\n      it('splits the test suite and runs multiple specified partitions', function () {\n        return execa('ember', [\n          'exam',\n          '--split',\n          '3',\n          '--partition',\n          '1,3',\n          '--path',\n          'acceptance-dist',\n        ]).then((child) => {\n          assertSomePartitions(child.stdout, ['1,3'], [1, 2, 3]);\n        });\n      });\n\n      it('errors when running an invalid partition', function () {\n        return execa('ember', [\n          'exam',\n          '--split',\n          '3',\n          '--partition',\n          '4',\n          '--path',\n          'acceptance-dist',\n        ]).then(assertExpectRejection, (error) => {\n          assert.ok(\n            error.stderr.includes(\n              'You must specify `partition` values that are less than or equal to your `split` value.',\n            ),\n          );\n        });\n      });\n\n      it('errors when specifying a partition but no split count', function () {\n        return execa('ember', [\n          'exam',\n          '--partition',\n          '2',\n          '--path',\n          'acceptance-dist',\n        ]).then(assertExpectRejection, (error) => {\n          assert.ok(\n            error.stderr.includes(\n              'You must specify a `split` value in order to use `partition`.',\n            ),\n          );\n        });\n      });\n    });\n\n    describe('Parallel', function () {\n      it('runs multiple partitions in parallel', function () {\n        return execa('ember', [\n          'exam',\n          '--path',\n          'acceptance-dist',\n          '--split',\n          '3',\n          '--parallel',\n        ]).then((child) => {\n          assertAllPartitions(child.stdout);\n        });\n      });\n\n      it('runs multiple specified partitions in parallel', function () {\n        return execa('ember', [\n          'exam',\n          '--split',\n          '3',\n          '--partition',\n          '1,3',\n          '--path',\n          'acceptance-dist',\n          '--parallel',\n        ]).then((child) => {\n          assertSomePartitions(child.stdout, [1, 3], [2]);\n        });\n      });\n    });\n  });\n\n  describe('Random', function () {\n    it('runs tests with the passed in seeds', function () {\n      return execa('ember', [\n        'exam',\n        '--random',\n        '1337',\n        '--path',\n        'acceptance-dist',\n      ]).then((child) => {\n        const stdout = child.stdout;\n        assert.ok(\n          stdout.includes('Randomizing tests with seed: 1337'),\n          'logged the seed value',\n        );\n        assert.strictEqual(\n          getNumberOfTests(stdout),\n          getTotalNumberOfTests(stdout),\n          'ran all of the tests in the suite',\n        );\n      });\n    });\n  });\n\n  describe('Load Balance', function () {\n    const unlinkFiles = [];\n\n    function assertTestExecutionFailedBrowsers(output, numberOfFailedBrowsers) {\n      const testExecutionPath = path.join(\n        process.cwd(),\n        output.match(/test-execution-([0-9]*).json/g)[0],\n      );\n      unlinkFiles.push(testExecutionPath);\n\n      assert.ok(\n        fs.existsSync(testExecutionPath),\n        'test execution json written to root',\n      );\n\n      const testExecutionFile = fs.readJsonSync(testExecutionPath);\n\n      assert.strictEqual(\n        testExecutionFile.failedBrowsers.length,\n        numberOfFailedBrowsers,\n        'failed browsers array is correctly recorded',\n      );\n    }\n\n    function assertModuleDetailJson(output) {\n      let moduleRunDetailJsonPath = path.join(\n        process.cwd(),\n        output.match(/module-metadata-([0-9]*).json/g)[0],\n      );\n      unlinkFiles.push(moduleRunDetailJsonPath);\n      assert.ok(\n        fs.existsSync(moduleRunDetailJsonPath),\n        'module run detail json written to root',\n      );\n    }\n\n    afterEach(() => {\n      unlinkFiles.forEach((path) => {\n        fs.unlinkSync(path);\n      });\n\n      unlinkFiles.length = 0;\n    });\n\n    it('errors if `--parallel` is not passed', function () {\n      return execa('ember', [\n        'exam',\n        '--path',\n        'acceptance-dist',\n        '--load-balance',\n      ]).then(assertExpectRejection, (error) => {\n        assert.ok(\n          error.stderr.includes(\n            'You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`.',\n          ),\n        );\n      });\n    });\n\n    it('load balances the test suite with one browser', function () {\n      return execa('ember', [\n        'exam',\n        '--path',\n        'acceptance-dist',\n        '--write-execution-file',\n        '--load-balance',\n        '--parallel',\n      ]).then((child) => {\n        const output = child.stdout;\n\n        assertTestExecutionFailedBrowsers(output, 0);\n        assertOutput(output, 'Browser Id', [1]);\n        assert.strictEqual(\n          getNumberOfTests(output),\n          getTotalNumberOfTests(output),\n          'ran all of the tests in the suite',\n        );\n      });\n    });\n\n    it('should write module detail json after execution with `write-module-metadata-file`.', function () {\n      return execa('ember', [\n        'exam',\n        '--path',\n        'acceptance-dist',\n        '--load-balance',\n        '--write-module-metadata-file',\n        '--parallel',\n      ]).then((child) => {\n        const output = child.stdout;\n        assertModuleDetailJson(output);\n      });\n    });\n\n    it('load balances the test suite with 3 browsers', function () {\n      return execa('ember', [\n        'exam',\n        '--path',\n        'acceptance-dist',\n        '--load-balance',\n        '--parallel',\n        '3',\n        '--write-execution-file',\n      ]).then((child) => {\n        const output = child.stdout;\n\n        assertTestExecutionFailedBrowsers(output, 0);\n        assertOutput(output, 'Browser Id', [1, 2, 3]);\n        assert.strictEqual(\n          getNumberOfTests(output),\n          getTotalNumberOfTests(output),\n          'ran all of the tests in the suite',\n        );\n      });\n    });\n\n    it(\"load balances partition 1's test suite with 3 browsers\", function () {\n      return execa('ember', [\n        'exam',\n        '--path',\n        'acceptance-dist',\n        '--load-balance',\n        '--split',\n        '2',\n        '--partition',\n        '1',\n        '--parallel',\n        '3',\n        '--write-execution-file',\n      ]).then((child) => {\n        const output = child.stdout;\n\n        assertTestExecutionFailedBrowsers(output, 0);\n        assertOutput(output, 'Exam Partition', [1], [2]);\n        assertOutput(output, 'Browser Id', [1, 2, 3]);\n        assert.ok(\n          getNumberOfTests(output) < getTotalNumberOfTests(output),\n          'did not run all of the tests in the suite',\n        );\n      });\n    });\n\n    describe('Failure Cases', function () {\n      const destPath = path.join(\n        __dirname,\n        '..',\n        '..',\n        'tests',\n        'unit',\n        'browser-exit-test.js',\n      );\n      beforeEach(function () {\n        const failingTestPath = path.join(\n          __dirname,\n          '..',\n          'fixtures',\n          'browser-exit.js',\n        );\n        fs.copySync(failingTestPath, destPath);\n        return execa('ember', ['build', '--output-path', 'failure-dist']);\n      });\n\n      afterEach(function () {\n        rimrafSync('failure-dist');\n        fs.removeSync(destPath);\n      });\n\n      it('should write test-execution json when browser exits', function () {\n        return execa('ember', [\n          'exam',\n          '--path',\n          'failure-dist',\n          '--load-balance',\n          '--parallel',\n          '3',\n          '--write-execution-file',\n        ]).then(assertExpectRejection, (error) => {\n          const output = error.stdout;\n          assert.ok(\n            output.includes(\n              'Error: Browser exited on request from test driver',\n            ),\n            `browser exited during the test execution:\\n${output}`,\n          );\n          assertTestExecutionFailedBrowsers(output, 1);\n        });\n      });\n\n      it('should write module metadata json when browser exits', function () {\n        return execa('ember', [\n          'exam',\n          '--path',\n          'failure-dist',\n          '--load-balance',\n          '--parallel',\n          '2',\n          '--write-module-metadata-file',\n        ]).then(assertExpectRejection, (error) => {\n          const output = error.stdout;\n          assert.ok(\n            output.includes(\n              'Error: Browser exited on request from test driver',\n            ),\n            `browser exited during the test execution:\\n${output}`,\n          );\n          assertModuleDetailJson(output);\n        });\n      });\n    });\n  });\n\n  describe('Replay Execution', function () {\n    let testExecutionJson = {};\n\n    beforeEach(() => {\n      testExecutionJson = {\n        numberOfBrowsers: 2,\n        failedBrowsers: [],\n        executionMapping: {\n          1: [\n            'dummy/tests/unit/test-loader-test',\n            'dummy/tests/unit/multiple-edge-cases-test',\n            'dummy/tests/unit/multiple-ember-tests-test',\n          ],\n          2: [\n            'dummy/tests/unit/multiple-tests-test',\n            'dummy/tests/unit/testem-output-test',\n            'dummy/tests/unit/weight-test-modules-test',\n            'dummy/tests/unit/async-iterator-test',\n            'dummy/tests/unit/filter-test-modules-test',\n          ],\n        },\n      };\n    });\n\n    afterEach(() => {\n      fs.unlinkSync(path.join(process.cwd(), 'test-execution-123.json'));\n      glob.sync('module-metadata-*.json').forEach((file) => {\n        fs.unlinkSync(path.join(process.cwd(), file));\n      });\n    });\n\n    it('replay only the failed browsers defined in failedBrowsers array', function () {\n      testExecutionJson.failedBrowsers.push('1');\n      fixturify.writeSync(process.cwd(), {\n        'test-execution-123.json': JSON.stringify(testExecutionJson),\n      });\n\n      return execa('ember', [\n        'exam',\n        '--replay-execution',\n        'test-execution-123.json',\n        '--path',\n        'acceptance-dist',\n      ]).then((child) => {\n        const output = child.stdout;\n        assert.strictEqual(\n          output.match(/test-execution-([0-9]*).json/g),\n          null,\n          'no test execution json should be written',\n        );\n\n        assertOutput(output, 'Browser Id', [1]);\n        assert.strictEqual(\n          getNumberOfTests(output),\n          25,\n          'ran all of the tests for browser one',\n        );\n      });\n    });\n\n    it('replay the full execution if failedBrowsers is empty', function () {\n      fixturify.writeSync(process.cwd(), {\n        'test-execution-123.json': JSON.stringify(testExecutionJson),\n      });\n\n      return execa('ember', [\n        'exam',\n        '--replay-execution',\n        'test-execution-123.json',\n        '--path',\n        'acceptance-dist',\n      ]).then((child) => {\n        const output = child.stdout;\n        assert.strictEqual(\n          output.match(/test-execution-([0-9]*).json/g),\n          null,\n          'no test execution json should be written',\n        );\n\n        assertOutput(output, 'Browser Id', [1, 2]);\n        assert.strictEqual(\n          getNumberOfTests(output),\n          getTotalNumberOfTests(output),\n          'ran all of the tests in the suite',\n        );\n      });\n    });\n\n    it('replay only the specified execution by --replay-browser', function () {\n      fixturify.writeSync(process.cwd(), {\n        'test-execution-123.json': JSON.stringify(testExecutionJson),\n      });\n\n      return execa('ember', [\n        'exam',\n        '--replay-execution',\n        'test-execution-123.json',\n        '--replay-browser',\n        '2',\n        '--path',\n        'acceptance-dist',\n      ]).then((child) => {\n        const output = child.stdout;\n        assert.strictEqual(\n          output.match(/test-execution-([0-9]*).json/g),\n          null,\n          'no test execution json should be written',\n        );\n\n        assertOutput(output, 'Browser Id', ['2']);\n        assert.strictEqual(\n          getNumberOfTests(output),\n          44,\n          'ran all of the tests for browser two',\n        );\n      });\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/acceptance/helpers.js",
    "content": "const path = require('path');\nconst fsExtra = require('fs-extra');\n\nasync function execa(command, args, options) {\n  const { execa: originalExeca } = await import('execa');\n  return originalExeca(command, args, options);\n}\n\nfunction getNumberOfTests(str) {\n  const match = str.match(/# tests ([0-9]+)/);\n  return match && parseInt(match[1], 10);\n}\n\nconst ROOT = path.resolve(__dirname, '../../');\nconst FIXTURE_DIR = path.resolve(ROOT, 'node-tests/fixtures');\n\nfunction applyFixture({ fixture, to }) {\n  fsExtra.copySync(path.join(FIXTURE_DIR, fixture), to);\n}\n\nmodule.exports = { execa, getNumberOfTests, applyFixture, ROOT };\n"
  },
  {
    "path": "node-tests/fixtures/browser-exit.js",
    "content": "import { module, test } from 'qunit';\n\nmodule('Module With Infinite Loop');\n\ntest('Infinite loop test #1', function (assert) {\n  assert.expect(1);\n  let condition = true;\n  while (condition) {\n    condition = condition || true;\n  }\n  assert.ok(true);\n});\n"
  },
  {
    "path": "node-tests/fixtures/failure.js",
    "content": "throw 'failure';\n"
  },
  {
    "path": "node-tests/fixtures/test-helper-with-load.js",
    "content": "import Application from 'dummy/app';\nimport config from 'dummy/config/environment';\nimport { setApplication } from '@ember/test-helpers';\nimport loadEmberExam from 'ember-exam/test-support/load';\nimport { start, setupEmberOnerrorValidation } from 'ember-qunit';\nimport { loadTests } from 'ember-qunit/test-loader';\n\nsetApplication(Application.create(config.APP));\nsetupEmberOnerrorValidation();\n\nloadEmberExam();\n\nloadTests();\nstart();\n"
  },
  {
    "path": "node-tests/fixtures/vite-eager-test-load.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>ViteWithCompat Tests</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n    {{content-for \"test-head\"}}\n\n    <link rel=\"stylesheet\" href=\"/@embroider/virtual/vendor.css\">\n    <link rel=\"stylesheet\" href=\"/@embroider/virtual/test-support.css\">\n\n    {{content-for \"head-footer\"}}\n    {{content-for \"test-head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n    {{content-for \"test-body\"}}\n\n    <div id=\"qunit\"></div>\n    <div id=\"qunit-fixture\">\n      <div id=\"ember-testing-container\">\n        <div id=\"ember-testing\"></div>\n      </div>\n    </div>\n\n    <script src=\"/testem.js\" integrity=\"\" data-embroider-ignore></script>\n    <script src=\"/@embroider/virtual/vendor.js\"></script>\n    <script src=\"/@embroider/virtual/test-support.js\"></script>\n    <script type=\"module\">import \"ember-testing\";</script>\n\n    <script type=\"module\">\n      import { start } from './test-helper.js';\n\n      const availableModules = {\n        ...import.meta.glob(\"./**/*-test.{js,ts,gjs,gts}\", { eager: true })\n      };\n\n      start({ availableModules });\n    </script>\n\n    {{content-for \"body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "node-tests/list.mjs",
    "content": "/**\n * This file is used by CI to list all the test files we have to generate a matrix to run.\n * Since our node-tests dirty the working directory, we don't want to have different test files stepping on each other.\n *\n * Additionally, this allows us to run each test file in parallel with the other test files.\n *\n * The code here was copied from https://github.com/embroider-build/try/blob/main/cli.js\n * (ish)\n */\nimport assert from \"node:assert\";\n// We run this file on node 24\n// eslint-disable-next-line n/no-unsupported-features/node-builtins\nimport { glob } from \"node:fs/promises\";\n\nlet files = [\n  {\n    name: \"Unit\",\n    command: \"pnpm mocha 'node-tests/unit/**/*-test.js'\",\n  },\n];\n\nfor await (const entry of glob(\n  \"node-tests/acceptance/**/*-test.{js,ts,mjs,cjs,mts,cts}\",\n)) {\n  let name = entry.replace(\"node-tests/acceptance/\", \"\");\n  files.push({\n    name,\n    include: {\n      name,\n      command: `pnpm mocha ${entry}`,\n    },\n  });\n}\n\nassert(\n  files.length > 0,\n  `There were no found test files -- this is unexpected`,\n);\n\nprocess.stdout.write(\n  JSON.stringify({\n    name: files.map((s) => s.name),\n    // always include an empty env by default, so that it's convenient to pass\n    // `${{ matrix.env }}` in github actions\n    include: files.map((s) => ({ env: {}, ...s.include })),\n  }),\n);\n"
  },
  {
    "path": "node-tests/unit/commands/exam-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst MockProject = require('ember-cli/tests/helpers/mock-project');\nconst Task = require('ember-cli/lib/models/task');\nconst RSVP = require('rsvp');\nconst sinon = require('sinon');\n\nconst ExamCommand = require('../../../lib/commands/exam');\n\ndescribe('ExamCommand', function () {\n  function createCommand() {\n    const tasks = {\n      Build: Task.extend(),\n      Test: Task.extend(),\n    };\n\n    const project = new MockProject();\n\n    project.isEmberCLIProject = function () {\n      return true;\n    };\n    project.pkg = {\n      devDependencies: {\n        'ember-cli': '3.7.0',\n      },\n    };\n\n    return new ExamCommand({\n      project: project,\n      tasks: tasks,\n      ui: {\n        writeLine: function () {},\n      },\n    });\n  }\n\n  describe('run', function () {\n    let command;\n    let called;\n\n    beforeEach(function () {\n      command = createCommand();\n\n      called = {};\n\n      command.tasks.Test.prototype.run = function (options) {\n        called.testRun = true;\n        called.testRunOptions = options;\n        return RSVP.resolve();\n      };\n\n      command.tasks.Build.prototype.run = function () {\n        called.buildRun = true;\n        return RSVP.resolve();\n      };\n    });\n\n    it('should defer to super with normal build task', function () {\n      return command.run({}).then(function () {\n        assert.strictEqual(called.testRun, true);\n        assert.strictEqual(called.buildRun, true);\n      });\n    });\n\n    it('should set `modulePath` in the query option', function () {\n      return command.run({ modulePath: 'foo' }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'modulePath=foo');\n      });\n    });\n\n    it('should set `partition` in the query option with one partition', function () {\n      return command.run({ split: 2, partition: [2] }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'split=2&partition=2');\n      });\n    });\n\n    it('should set `load-balance` in the query option', function () {\n      return command.run({ loadBalance: true, parallel: 1 }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'loadBalance');\n      });\n    });\n\n    it('should set `preserve-test-name` in the query option', function () {\n      return command.run({ preserveTestName: true }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'preserveTestName');\n      });\n    });\n\n    it('should set `partition` in the query option with multiple partitions', function () {\n      return command.run({ split: 2, partition: [1, 2] }).then(function () {\n        assert.strictEqual(\n          called.testRunOptions.query,\n          'split=2&partition=1&partition=2',\n        );\n      });\n    });\n\n    it('should append `partition` to the query option', function () {\n      return command\n        .run({ split: 2, partition: [2], query: 'someQuery=derp&hidepassed' })\n        .then(function () {\n          assert.strictEqual(\n            called.testRunOptions.query,\n            'someQuery=derp&hidepassed&split=2&partition=2',\n          );\n        });\n    });\n\n    it('should not append `partition` to the query option when parallelizing', function () {\n      return command\n        .run({ split: 2, partition: [1, 2], parallel: 1 })\n        .then(function () {\n          assert.strictEqual(called.testRunOptions.query, 'split=2');\n        });\n    });\n\n    it('should not append `partition` to the query option when not parallelizing without partitions', function () {\n      return command.run({ split: 2 }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'split=2');\n      });\n    });\n\n    it('should set  `seed=1337` in the query option', function () {\n      return command.run({ random: '1337' }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'seed=1337');\n      });\n    });\n\n    it('should append `seed=1337` to the query option', function () {\n      return command\n        .run({ random: '1337', query: 'someQuery=derp&hidepassed' })\n        .then(function () {\n          assert.strictEqual(\n            called.testRunOptions.query,\n            'someQuery=derp&hidepassed&seed=1337',\n          );\n        });\n    });\n\n    it('should set `seed=random_seed` in the query option', function () {\n      const randomStub = sinon.stub(Math, 'random').returns('  random_seed');\n      return command.run({ random: '' }).then(function () {\n        assert.strictEqual(called.testRunOptions.query, 'seed=random_seed');\n        randomStub.restore();\n      });\n    });\n\n    it('should set split env var', function () {\n      return command.run({ split: 5 }).then(function () {\n        assert.strictEqual(process.env.EMBER_EXAM_SPLIT_COUNT, '5');\n      });\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/unit/utils/config-reader-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst fixturify = require('fixturify');\nconst fs = require('fs-extra');\nconst path = require('path');\nconst readTestemConfig = require('../../../lib/utils/config-reader');\n\nconst fixturifyDir = 'tmp/fixture';\n\ndescribe('ConfigReader | readTestemConfig', function () {\n  beforeEach(function () {\n    fs.mkdirpSync(fixturifyDir);\n    this.fixturifyContent = {\n      foo: 'bar',\n    };\n  });\n\n  afterEach(function () {\n    fs.removeSync(fixturifyDir);\n  });\n\n  it('should find `testem.js` file by default and return `true` when no file name and no potential files specified', function () {\n    fixturify.writeSync(fixturifyDir, {});\n    assert.ok(readTestemConfig());\n  });\n\n  it(\"should return `false` if file doesn't exsit when potential files are empty list\", function () {\n    assert.ok(!readTestemConfig('this-file-do-not-exsit.json', []));\n  });\n\n  it(\"should find `testem.js` file by default and return `true` when file specified doesn't exist\", function () {\n    assert.ok(readTestemConfig('this-file-do-not-exsit.json'));\n  });\n\n  it('should require a specified `js` file and return an object in the module when no potential files specified', function () {\n    assert.deepEqual(readTestemConfig('testem.simple-test-page.js').foo, 'bar');\n  });\n\n  it('should require a specified `js` file and return an object in the module when the file exsits and potential files are empty list', function () {\n    assert.deepEqual(\n      readTestemConfig('testem.simple-test-page.js', []).foo,\n      'bar',\n    );\n  });\n\n  it('should read a specified `json` file and return an object read from the file', function () {\n    fixturify.writeSync(fixturifyDir, {\n      'testem.json-file.json': JSON.stringify(this.fixturifyContent),\n    });\n    assert.deepEqual(\n      readTestemConfig(path.join(fixturifyDir, 'testem.json-file.json'), [])\n        .foo,\n      'bar',\n    );\n  });\n\n  it('should read a specified `yaml` file and return an object read from the file', function () {\n    fixturify.writeSync(fixturifyDir, {\n      'testem.yaml-file.yaml': JSON.stringify(this.fixturifyContent),\n    });\n    assert.deepEqual(\n      readTestemConfig(path.join(fixturifyDir, 'testem.yaml-file.yaml'), [])\n        .foo,\n      'bar',\n    );\n  });\n});\n"
  },
  {
    "path": "node-tests/unit/utils/execution-state-manager-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst ExecutionStateManager = require('../../../lib/utils/execution-state-manager');\n\ndescribe('ExecutionStateManager', function () {\n  beforeEach(function () {\n    this.stateManager = new ExecutionStateManager();\n    this.moduleQueue = ['foo', 'bar', 'baz', 'boo', 'far', 'faz'];\n  });\n\n  describe('initializeStates', function () {\n    it('initialize states', function () {\n      assert.deepEqual(this.stateManager.getModuleMap().size, 0);\n      assert.deepEqual(this.stateManager.getTestModuleQueue(), null);\n      assert.deepEqual(this.stateManager.getReplayExecutionModuleQueue(), null);\n    });\n  });\n\n  describe('moduleQueue', function () {\n    it('is shared when no browserId passed to setModuleQueue', function () {\n      this.stateManager.setTestModuleQueue(this.moduleQueue);\n\n      assert.deepEqual(\n        this.stateManager.getTestModuleQueue(),\n        this.moduleQueue,\n        'the correct moduleQueue was returned',\n      );\n    });\n\n    it('returns the next module from the shared moduleQueue and state is preserved', function () {\n      this.stateManager.setTestModuleQueue(this.moduleQueue);\n\n      assert.strictEqual(\n        this.stateManager.getNextModuleTestModuleQueue(),\n        'foo',\n        'correctly returns the next module',\n      );\n      assert.deepEqual(\n        this.stateManager.getTestModuleQueue(),\n        ['bar', 'baz', 'boo', 'far', 'faz'],\n        'the moduleQueue state was updated',\n      );\n    });\n\n    it('get next module returns null if shared moduleQueue is not set', function () {\n      assert.strictEqual(\n        this.stateManager.getNextModuleTestModuleQueue(),\n        null,\n        'returns null when moduleQueue has not been set',\n      );\n    });\n\n    it('had different queue set when when browserId is specified', function () {\n      const anotherQueue = ['1', '2', '3', '4'];\n      this.stateManager.setReplayExecutionModuleQueue(this.moduleQueue, 1);\n      this.stateManager.setReplayExecutionModuleQueue(anotherQueue, 2);\n\n      assert.deepEqual(\n        this.stateManager.getReplayExecutionModuleQueue(1),\n        this.moduleQueue,\n      );\n      assert.deepEqual(\n        this.stateManager.getReplayExecutionModuleQueue(2),\n        anotherQueue,\n      );\n    });\n\n    it('returns the next module from the browser specific moduleQueue and state is preserved', function () {\n      this.stateManager.setReplayExecutionModuleQueue(this.moduleQueue, 1);\n\n      assert.strictEqual(\n        this.stateManager.getNextModuleReplayExecutionModuleQueue(1),\n        'foo',\n        'correctly returns the next module',\n      );\n      assert.deepEqual(\n        this.stateManager.getReplayExecutionModuleQueue(1),\n        ['bar', 'baz', 'boo', 'far', 'faz'],\n        'the moduleQueue state was updated',\n      );\n    });\n\n    it('get next module returns null if browser moduleQueue is not set', function () {\n      assert.strictEqual(\n        this.stateManager.getNextModuleReplayExecutionModuleQueue(1),\n        null,\n        'returns null when moduleQueue has not been set',\n      );\n    });\n  });\n\n  describe('completedBrowsers', function () {\n    it('incrementCompletedBrowsers called for the same browserId will only be accounted once', function () {\n      this.stateManager.incrementCompletedBrowsers(1);\n      this.stateManager.incrementCompletedBrowsers(1);\n\n      assert.deepEqual(this.stateManager.getCompletedBrowser(), 1);\n    });\n  });\n\n  describe('moduleRunDetails', function () {\n    it('returns a map size of 0', function () {\n      assert.strictEqual(this.stateManager.getModuleMetadata().size, 0);\n    });\n\n    it('adds a single testDone module metadata to moduleMetadata.', function () {\n      const testModuleName = 'foo';\n      const moduleMetadata = {\n        moduleName: testModuleName,\n        testName: 'testing foo',\n        passed: 1,\n        failed: 0,\n        skipped: false,\n        total: 1,\n        duration: 1,\n      };\n\n      this.stateManager.addToModuleMetadata(moduleMetadata);\n\n      const fooModuleMetadata = this.stateManager\n        .getModuleMetadata()\n        .get(testModuleName);\n\n      assert.strictEqual(fooModuleMetadata.passed, 1);\n      assert.strictEqual(fooModuleMetadata.failed, 0);\n      assert.strictEqual(fooModuleMetadata.skipped, 0);\n      assert.strictEqual(fooModuleMetadata.duration, 1);\n      assert.strictEqual(fooModuleMetadata.failedTests.length, 0);\n    });\n\n    it('adds two test metadata and returns cumulative module data', function () {\n      const fooTestModule = 'foo';\n      const fooTestMetadata = {\n        moduleName: fooTestModule,\n        testName: 'testing foo',\n        passed: 1,\n        failed: 0,\n        skipped: false,\n        total: 1,\n        duration: 1,\n      };\n\n      const barTestMetadata = {\n        moduleName: fooTestModule,\n        testName: 'testing bar',\n        passed: 0,\n        failed: 1,\n        skipped: false,\n        total: 1,\n        duration: 1.8,\n      };\n\n      this.stateManager.addToModuleMetadata(fooTestMetadata);\n      this.stateManager.addToModuleMetadata(barTestMetadata);\n\n      const fooModuleMetadata = this.stateManager\n        .getModuleMetadata()\n        .get(fooTestModule);\n\n      assert.strictEqual(fooModuleMetadata.total, 2);\n      assert.strictEqual(fooModuleMetadata.passed, 1);\n      assert.strictEqual(fooModuleMetadata.failed, 1);\n      assert.strictEqual(fooModuleMetadata.skipped, 0);\n      assert.strictEqual(fooModuleMetadata.duration, 2.8);\n      assert.strictEqual(fooModuleMetadata.failedTests.length, 1);\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/unit/utils/query-helper-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst { addToQuery, addToUrl } = require('../../../lib/utils/query-helper');\n\ndescribe('QueryHelper', function () {\n  describe('addToQuery', function () {\n    it('should add param when no query and value is true', function () {\n      const validQuery = addToQuery(null, 'foo', true);\n\n      assert.deepEqual(validQuery, 'foo');\n    });\n\n    it('should add param and value when no query and value is string', function () {\n      const validQuery = addToQuery(null, 'foo', 'bar');\n\n      assert.deepEqual(validQuery, 'foo=bar');\n    });\n\n    it('should add param to query when value is boolean', function () {\n      const validQuery = addToQuery('foo', 'bar', true);\n\n      assert.deepEqual(validQuery, 'foo&bar');\n    });\n\n    it('should add param and value to query when value is string', function () {\n      const validQuery = addToQuery('foo', 'bar', 'baz');\n\n      assert.deepEqual(validQuery, 'foo&bar=baz');\n    });\n\n    it('should not add param when value is false', function () {\n      const validQuery = addToQuery('foo', 'bar', false);\n\n      assert.deepEqual(validQuery, 'foo');\n    });\n  });\n\n  describe('addToUrl', function () {\n    it('should add param to url when value is true', function () {\n      const url = addToUrl('tests/index.html?hidepassed', 'foo', true);\n\n      assert.deepEqual(url, 'tests/index.html?hidepassed&foo');\n    });\n\n    it('should not add param to url when value is false', function () {\n      const url = addToUrl('tests/index.html?hidepassed', 'foo', false);\n\n      assert.deepEqual(url, 'tests/index.html?hidepassed');\n    });\n\n    it('should add param and value to url when value is string', function () {\n      const url = addToUrl('tests/index.html?hidepassed', 'foo', 'bar');\n\n      assert.deepEqual(url, 'tests/index.html?hidepassed&foo=bar');\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/unit/utils/test-page-helper-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst sinon = require('sinon');\nconst {\n  combineOptionValueIntoArray,\n  getBrowserId,\n  getCustomBaseUrl,\n  getMultipleTestPages,\n  getTestUrlFromTestemConfig,\n} = require('../../../lib/utils/test-page-helper');\n\ndescribe('TestPageHelper', function () {\n  describe('combineOptionValueIntoArray', function () {\n    it('should return empty array when no optionValue specified', function () {\n      assert.deepEqual(combineOptionValueIntoArray(), []);\n    });\n\n    it('should have a specified option number when the option is number', function () {\n      assert.deepEqual(combineOptionValueIntoArray(3), [3]);\n    });\n\n    it('should have a number of array when a specified option is string', function () {\n      assert.deepEqual(combineOptionValueIntoArray('2,3'), [2, 3]);\n    });\n\n    it('should have a numbe of array when a specified option is a combination of number and string ', function () {\n      assert.deepEqual(combineOptionValueIntoArray([1, '2,3']), [1, 2, 3]);\n    });\n\n    it('should have a sequence number of array when a specified option is in range', function () {\n      assert.deepEqual(combineOptionValueIntoArray('1..5'), [1, 2, 3, 4, 5]);\n    });\n\n    it('should have a number of array when a specified option is a combination of number and string in range', function () {\n      assert.deepEqual(\n        combineOptionValueIntoArray([1, '3..6']),\n        [1, 3, 4, 5, 6],\n      );\n    });\n  });\n\n  describe('getBrowserId', function () {\n    it('should return the correct browserId', function () {\n      const launcher = {\n        settings: {\n          test_page: 'loadBalance&browser=1',\n        },\n      };\n      assert.strictEqual(getBrowserId(launcher), '1');\n    });\n\n    it('should throw an error if the launcher does not have test page set', function () {\n      const warnStub = sinon.stub(console, 'warn');\n      const launcher = {\n        foo: 'bar',\n      };\n      assert.strictEqual(getBrowserId(launcher), 0);\n      sinon.assert.calledOnce(warnStub);\n      sinon.assert.calledWithMatch(warnStub, /Launcher Settings:/);\n      warnStub.restore();\n    });\n  });\n\n  describe('getTestUrlFromTestemConfig', function () {\n    it('should have a default test page with no config file', function () {\n      const testPage = getTestUrlFromTestemConfig('');\n\n      assert.deepEqual(testPage, 'tests/index.html?hidepassed');\n    });\n\n    it('should have a default test page with no test-page specified in a testem config file', function () {\n      const warnStub = sinon.stub(console, 'warn');\n      const testPage = getTestUrlFromTestemConfig('testem.no-test-page.js');\n\n      assert.deepEqual(testPage, 'tests/index.html?hidepassed');\n\n      sinon.assert.calledOnce(warnStub);\n      sinon.assert.calledWithExactly(\n        warnStub,\n        'No test_page value found in the config. Defaulting to \"tests/index.html?hidepassed\"',\n      );\n\n      warnStub.restore();\n    });\n\n    it('should have multiple test pages specified in testem config file with test-page specified in the file', function () {\n      const testPages = getTestUrlFromTestemConfig(\n        'testem.multiple-test-page.js',\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&derp=herp',\n        'tests/index.html?hidepassed&foo=bar',\n      ]);\n    });\n  });\n\n  describe('getCustomBaseUrl', function () {\n    it('should add `split` when `split` option is used', function () {\n      const appendedUrl = getCustomBaseUrl(\n        { split: 3 },\n        'tests/index.html?hidepassed',\n      );\n\n      assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&split=3');\n    });\n\n    it('should add `split` when `split` and `parallel` option are used', function () {\n      const appendedUrl = getCustomBaseUrl(\n        { split: 5, parallel: true },\n        'tests/index.html?hidepassed',\n      );\n\n      assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&split=5');\n    });\n\n    it('should add `loadBalance` when `load-balance` option is used', function () {\n      const appendedUrl = getCustomBaseUrl(\n        { loadBalance: 2 },\n        'tests/index.html?hidepassed',\n      );\n\n      assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&loadBalance');\n    });\n\n    it('should add `split`, `loadBalance`, and `partition` when `split`, `loadBalance`, and `partition` are used.', function () {\n      const appendedUrl = getCustomBaseUrl(\n        { split: 5, partition: [1, 2, 3], loadBalance: 2 },\n        'tests/index.html?hidepassed',\n      );\n\n      assert.deepEqual(\n        appendedUrl,\n        'tests/index.html?hidepassed&split=5&loadBalance&partition=1&partition=2&partition=3',\n      );\n    });\n\n    it('should add `loadBalance` when `replay-execution` and `replay-browser` are used', function () {\n      const appendedUrl = getCustomBaseUrl(\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [1, 2],\n        },\n        'tests/index.html?hidepassed',\n      );\n\n      assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&loadBalance');\n    });\n\n    it('should add `split` to multiple test pages when `split` option is used', function () {\n      const appendedUrl = getCustomBaseUrl({ split: 3 }, [\n        'tests/index.html?hidepassed&derp=herp',\n        'tests/index.html?hidepassed&foo=bar',\n      ]);\n\n      assert.deepEqual(appendedUrl, [\n        'tests/index.html?hidepassed&derp=herp&split=3',\n        'tests/index.html?hidepassed&foo=bar&split=3',\n      ]);\n    });\n\n    it('should add `split` when `split` to multiple test pages and `parallel` option are used', function () {\n      const appendedUrl = getCustomBaseUrl({ split: 5, parallel: true }, [\n        'tests/index.html?hidepassed&derp=herp',\n        'tests/index.html?hidepassed&foo=bar',\n      ]);\n\n      assert.deepEqual(appendedUrl, [\n        'tests/index.html?hidepassed&derp=herp&split=5',\n        'tests/index.html?hidepassed&foo=bar&split=5',\n      ]);\n    });\n\n    it('should add `loadBalance` to multiple test pages when `load-balance` option is used', function () {\n      const appendedUrl = getCustomBaseUrl({ loadBalance: 2 }, [\n        'tests/index.html?hidepassed&derp=herp',\n        'tests/index.html?hidepassed&foo=bar',\n      ]);\n\n      assert.deepEqual(appendedUrl, [\n        'tests/index.html?hidepassed&derp=herp&loadBalance',\n        'tests/index.html?hidepassed&foo=bar&loadBalance',\n      ]);\n    });\n\n    it('should add `split`, `loadBalance`, and `partition` to multiple test pages when `split`, `loadBalance`, and `partition` are used.', function () {\n      const appendedUrl = getCustomBaseUrl(\n        { split: 5, partition: [1, 2, 3], loadBalance: 2 },\n        [\n          'tests/index.html?hidepassed&derp=herp',\n          'tests/index.html?hidepassed&foo=bar',\n        ],\n      );\n\n      assert.deepEqual(appendedUrl, [\n        'tests/index.html?hidepassed&derp=herp&split=5&loadBalance&partition=1&partition=2&partition=3',\n        'tests/index.html?hidepassed&foo=bar&split=5&loadBalance&partition=1&partition=2&partition=3',\n      ]);\n    });\n\n    it('should add `loadBalance` to multiple test pages when `replay-execution` and `replay-browser` are used', function () {\n      const appendedUrl = getCustomBaseUrl(\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [1, 2],\n        },\n        [\n          'tests/index.html?hidepassed&derp=herp',\n          'tests/index.html?hidepassed&foo=bar',\n        ],\n      );\n\n      assert.deepEqual(appendedUrl, [\n        'tests/index.html?hidepassed&derp=herp&loadBalance',\n        'tests/index.html?hidepassed&foo=bar&loadBalance',\n      ]);\n    });\n  });\n\n  describe('getMultipleTestPages', function () {\n    it('should have multiple test pages with no partitions specified', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { parallel: 1, split: 2 },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&split=2&partition=1',\n        'tests/index.html?hidepassed&split=2&partition=2',\n      ]);\n    });\n\n    it('should have multiple test pages with specified partitions', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { parallel: 1, split: 4, partition: [3, 4] },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&split=4&partition=3',\n        'tests/index.html?hidepassed&split=4&partition=4',\n      ]);\n    });\n\n    it('should have multiple test pages for each test_page in the config file with no partitions specified', function () {\n      const testPages = getMultipleTestPages(\n        { configFile: 'testem.multiple-test-page.js' },\n        { parallel: 1, split: 2 },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&derp=herp&split=2&partition=1',\n        'tests/index.html?hidepassed&derp=herp&split=2&partition=2',\n        'tests/index.html?hidepassed&foo=bar&split=2&partition=1',\n        'tests/index.html?hidepassed&foo=bar&split=2&partition=2',\n      ]);\n    });\n\n    it('should have multiple test pages for each test_page in the config file with partitions specified', function () {\n      const testPages = getMultipleTestPages(\n        { configFile: 'testem.multiple-test-page.js' },\n        { parallel: 1, split: 4, partition: [3, 4] },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&derp=herp&split=4&partition=3',\n        'tests/index.html?hidepassed&derp=herp&split=4&partition=4',\n        'tests/index.html?hidepassed&foo=bar&split=4&partition=3',\n        'tests/index.html?hidepassed&foo=bar&split=4&partition=4',\n      ]);\n    });\n\n    it('should have a test page with `loadBalance` when no specified number of browser', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { loadBalance: true, parallel: 1 },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&loadBalance&browser=1',\n      ]);\n    });\n\n    it('should have multiple test page with `loadBalance` with splitting when no specified number of browser', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { loadBalance: true, parallel: 1, split: 2 },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&split=2&loadBalance&browser=1',\n      ]);\n    });\n\n    it('should have multiple test pages with test loading balanced, no specified partitions and no splitting', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { loadBalance: true, parallel: 2 },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&loadBalance&browser=1',\n        'tests/index.html?hidepassed&loadBalance&browser=2',\n      ]);\n    });\n\n    it('should have multiple test pages with test loading balanced, no specified partitions and no splitting', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { loadBalance: true, parallel: 2, split: 3, partition: [2, 3] },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&split=3&loadBalance&partition=2&partition=3&browser=1',\n        'tests/index.html?hidepassed&split=3&loadBalance&partition=2&partition=3&browser=2',\n      ]);\n    });\n\n    it('should have multiple test pages for each test_page in the config file with partitions specified and test loading balanced', function () {\n      const testPages = getMultipleTestPages(\n        { configFile: 'testem.multiple-test-page.js' },\n        { loadBalance: true, parallel: 1, split: 4, partition: [3, 4] },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&derp=herp&split=4&loadBalance&partition=3&partition=4&browser=1',\n        'tests/index.html?hidepassed&foo=bar&split=4&loadBalance&partition=3&partition=4&browser=1',\n      ]);\n    });\n\n    it('should have multiple test pages with test replay execution', function () {\n      const testPages = getMultipleTestPages(\n        { testPage: 'tests/index.html?hidepassed' },\n        { replayExecution: 'abc.json', replayBrowser: [2] },\n      );\n\n      assert.deepEqual(testPages, [\n        'tests/index.html?hidepassed&loadBalance&browser=2',\n      ]);\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/unit/utils/testem-events-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst fixturify = require('fixturify');\nconst fs = require('fs-extra');\nconst path = require('path');\nconst TestemEvents = require('../../../lib/utils/testem-events');\n\nconst fixtureDir = 'tmp/fixture';\nconst testExecutionJsonPath = path.join(fixtureDir, 'test-execution-123.json');\nconst testExecutionJson = {\n  numberOfBrowsers: 1,\n  failedBrowsers: [],\n  executionMapping: {\n    1: ['path/to/testmodule', 'path/to/another/testmodule'],\n  },\n};\n\ndescribe('TestemEvents', function () {\n  beforeEach(function () {\n    fs.mkdirpSync(fixtureDir);\n    this.testemEvents = new TestemEvents(fixtureDir);\n    this.moduleQueue = ['foo', 'bar', 'baz', 'boo', 'far', 'faz'];\n  });\n\n  afterEach(function () {\n    fs.removeSync(fixtureDir);\n  });\n\n  describe('setModuleQueue', function () {\n    beforeEach(function () {\n      fixturify.writeSync(fixtureDir, {\n        'test-execution-123.json': JSON.stringify(testExecutionJson),\n      });\n    });\n\n    it('set TestModuleQueue for load-balance mode', function () {\n      this.testemEvents.setModuleQueue(1, this.moduleQueue, true, false);\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getTestModuleQueue(),\n        this.moduleQueue,\n      );\n    });\n\n    it('ignore subsequent setModuleQueue if moduleQueue is already set for load-balance mode', function () {\n      const anotherModuleQueue = ['a', 'b', 'c'];\n      this.testemEvents.setModuleQueue('1', this.moduleQueue, true, false);\n      this.testemEvents.setModuleQueue('2', anotherModuleQueue, true, false);\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getTestModuleQueue(),\n        this.moduleQueue,\n      );\n    });\n\n    it('set replayExecutionModuleQueue for replay-execution mode', function () {\n      this.testemEvents.setReplayExecutionMap(testExecutionJsonPath, ['1']);\n      this.testemEvents.setModuleQueue('1', this.moduleQueue, false, true);\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getReplayExecutionModuleQueue('1'),\n        testExecutionJson.executionMapping['1'],\n      );\n    });\n\n    it('set replayExecutionModuleQueue for replay-execution mode when replay-browser is undefined', function () {\n      this.testemEvents.setReplayExecutionMap(testExecutionJsonPath);\n      this.testemEvents.setModuleQueue('1', this.moduleQueue, false, true);\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getReplayExecutionModuleQueue('1'),\n        testExecutionJson.executionMapping['1'],\n      );\n    });\n\n    it('throws error if ReplayExecutionMap is not set when setting replayExecutionModuleQueue for replay-execution mode', function () {\n      assert.throws(\n        () =>\n          this.testemEvents.setModuleQueue(1, this.moduleQueue, false, true),\n        /No replay execution map was set on the stateManager/,\n        'Error is thrown',\n      );\n    });\n  });\n\n  describe('nextModuleResponse', function () {\n    const socket = {\n      events: [],\n      emit: function (event, payload) {\n        this.events.push(event);\n\n        if (payload) {\n          this.events.push(payload);\n        }\n      },\n      reset: function () {\n        this.events.length = 0;\n      },\n    };\n\n    const fooResponse = {\n      done: false,\n      value: 'foo',\n    };\n\n    const emptyMap = new Map();\n\n    afterEach(function () {\n      socket.reset();\n    });\n\n    it('should fire next-module-response event and save the moduleName to stateManager.moduleMap when write-execution-file is true', function () {\n      this.testemEvents.stateManager.setTestModuleQueue(this.moduleQueue);\n      this.testemEvents.nextModuleResponse(1, socket, true);\n\n      assert.deepEqual(\n        socket.events,\n        ['testem:next-module-response', fooResponse],\n        'testem:next-module-response event was emitted with payload foo',\n      );\n      assert.deepEqual(\n        this.testemEvents.stateManager.getModuleMap().values().next().value,\n        ['foo'],\n        'module was correctly saved to the moduleMap',\n      );\n    });\n\n    it('should not save the moduleName to stateManager.moduleMap when write-execution-file is false', function () {\n      this.testemEvents.stateManager.setReplayExecutionModuleQueue([], 1);\n      this.testemEvents.nextModuleResponse(1, socket, false);\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getModuleMap(),\n        emptyMap,\n        'moduleMap should be in its initial state',\n      );\n    });\n\n    it('should throw error if no moduleQueues were set', function () {\n      assert.throws(\n        () => this.testemEvents.nextModuleResponse(1, socket, false, 'dev'),\n        /No moduleQueue was set/,\n        'No moduleQueue error was thrown',\n      );\n    });\n  });\n\n  describe('recordFailedBrowserId', function () {\n    const launcher = {\n      settings: {\n        test_page: 'browser=1',\n      },\n    };\n\n    it('record new browserId if test failed', function () {\n      this.testemEvents.recordFailedBrowserId(launcher, {});\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getFailedBrowsers(),\n        [1],\n        'failed browserId 1 is correctly recorded',\n      );\n    });\n\n    it('does not record browserId that has already been recorded', function () {\n      this.testemEvents.recordFailedBrowserId(launcher, {});\n      this.testemEvents.recordFailedBrowserId(launcher, {});\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getFailedBrowsers(),\n        [1],\n        'failed browserId 1 is correctly recorded only once',\n      );\n    });\n  });\n\n  describe('completedBrowsersHandler', function () {\n    const mockUi = {\n      writeLine: () => {},\n    };\n\n    it('should increment completedBrowsers only when completedBrowsers is less than browserCount', function () {\n      this.testemEvents.completedBrowsersHandler(\n        2,\n        1,\n        mockUi,\n        new Map([\n          ['loadBalance', true],\n          ['writeExecutionFile', false],\n        ]),\n        '0000',\n      );\n\n      assert.strictEqual(\n        this.testemEvents.stateManager.getCompletedBrowser(),\n        1,\n        'completedBrowsers was incremented',\n      );\n    });\n\n    it('should write test-execution file and cleanup state when completedBrowsers equals browserCount and load-balance is true', function () {\n      this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1);\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n\n      this.testemEvents.completedBrowsersHandler(\n        1,\n        1,\n        mockUi,\n        new Map([\n          ['loadBalance', true],\n          ['writeExecutionFile', true],\n        ]),\n        '0000',\n      );\n\n      const actual = fs.readFileSync(\n        path.join(fixtureDir, 'test-execution-0000.json'),\n      );\n\n      assert.deepEqual(JSON.parse(actual), {\n        numberOfBrowsers: 1,\n        failedBrowsers: [],\n        executionMapping: {\n          1: ['a'],\n        },\n      });\n    });\n\n    it('should write module-run-details file and cleanup state when completedBrowsers equals browserCount, load-balance is true, and write-execution-file is false', function () {\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'a',\n        testName: 'test',\n        passed: true,\n        failed: false,\n        skipped: false,\n        duration: 1,\n      });\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n\n      this.testemEvents.completedBrowsersHandler(\n        1,\n        1,\n        mockUi,\n        new Map([\n          ['loadBalance', true],\n          ['writeModuleMetadataFile', true],\n        ]),\n        '0000',\n      );\n\n      const actual = fs.readFileSync(\n        path.join(fixtureDir, 'module-metadata-0000.json'),\n      );\n\n      assert.deepEqual(JSON.parse(actual).modules, [\n        {\n          moduleName: 'a',\n          total: 1,\n          passed: 1,\n          failed: 0,\n          skipped: 0,\n          duration: 1,\n          failedTests: [],\n        },\n      ]);\n    });\n\n    it('should write module-run-details file with sorted by duration', function () {\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'a',\n        testName: 'test 1',\n        passed: true,\n        failed: false,\n        skipped: false,\n        duration: 1,\n      });\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'a',\n        testName: 'test 2',\n        passed: false,\n        failed: true,\n        skipped: false,\n        duration: 8,\n      });\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'b',\n        testName: 'test 1',\n        passed: true,\n        failed: false,\n        skipped: false,\n        duration: 1,\n      });\n\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n\n      this.testemEvents.completedBrowsersHandler(\n        1,\n        1,\n        mockUi,\n        new Map([\n          ['loadBalance', true],\n          ['writeModuleMetadataFile', true],\n        ]),\n        '0000',\n      );\n\n      const actual = fs.readFileSync(\n        path.join(fixtureDir, 'module-metadata-0000.json'),\n      );\n\n      assert.deepEqual(JSON.parse(actual).modules, [\n        {\n          moduleName: 'a',\n          total: 2,\n          passed: 1,\n          failed: 1,\n          skipped: 0,\n          duration: 9,\n          failedTests: ['test 2'],\n        },\n        {\n          moduleName: 'b',\n          total: 1,\n          passed: 1,\n          failed: 0,\n          skipped: 0,\n          duration: 1,\n          failedTests: [],\n        },\n      ]);\n    });\n\n    it('should add skipped test number to write module-metadata-file with sorted by duration', function () {\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'a',\n        testName: 'test 1',\n        passed: true,\n        failed: false,\n        skipped: true,\n        duration: 0,\n      });\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'a',\n        testName: 'test 2',\n        passed: false,\n        failed: true,\n        skipped: false,\n        duration: 8,\n      });\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'b',\n        testName: 'test 1',\n        passed: true,\n        failed: false,\n        skipped: false,\n        duration: 1,\n      });\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'b',\n        testName: 'test 1',\n        passed: true,\n        failed: false,\n        skipped: false,\n        duration: 0,\n      });\n      this.testemEvents.stateManager.addToModuleMetadata({\n        moduleName: 'b',\n        testName: 'test 1',\n        paseed: true,\n        failed: false,\n        skipped: true,\n        duration: 1,\n      });\n\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n\n      this.testemEvents.completedBrowsersHandler(\n        1,\n        1,\n        mockUi,\n        new Map([\n          ['loadBalance', true],\n          ['writeModuleMetadataFile', true],\n        ]),\n        '0000',\n      );\n\n      const actual = fs.readFileSync(\n        path.join(fixtureDir, 'module-metadata-0000.json'),\n      );\n\n      assert.deepEqual(JSON.parse(actual).modules, [\n        {\n          moduleName: 'a',\n          total: 2,\n          passed: 0,\n          failed: 1,\n          skipped: 1,\n          duration: 8,\n          failedTests: ['test 2'],\n        },\n        {\n          moduleName: 'b',\n          total: 3,\n          passed: 2,\n          failed: 0,\n          skipped: 1,\n          duration: 2,\n          failedTests: [],\n        },\n      ]);\n    });\n\n    it('should increment completedBrowsers when load-balance is false', function () {\n      this.testemEvents.completedBrowsersHandler(\n        2,\n        1,\n        mockUi,\n        new Map([\n          ['loadBalance', false],\n          ['writeExecutionFile', false],\n        ]),\n        '0000',\n      );\n\n      assert.strictEqual(\n        this.testemEvents.stateManager.getCompletedBrowser(),\n        1,\n        'completedBrowsers was incremented',\n      );\n    });\n\n    it('should not clean up states from stateManager when test execution is not completed', function () {\n      this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1);\n      this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('b', 2);\n\n      this.testemEvents.completedBrowsersHandler(\n        2,\n        1011,\n        mockUi,\n        new Map([['loadBalance', true]]),\n        '0000',\n      );\n\n      assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 2);\n    });\n\n    it('should clean up states from stateManager when test execution is completed', function () {\n      const mockReplayExecutionMap = { 1: ['a'] };\n      this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1);\n      this.testemEvents.stateManager.setReplayExecutionMap(\n        mockReplayExecutionMap,\n      );\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n\n      this.testemEvents.completedBrowsersHandler(\n        1,\n        1010,\n        mockUi,\n        new Map([['loadBalance', true]]),\n        '0000',\n      );\n\n      assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 0);\n      assert.deepEqual(\n        this.testemEvents.stateManager.getTestModuleQueue(),\n        null,\n      );\n      assert.deepEqual(\n        this.testemEvents.stateManager.getReplayExecutionModuleQueue(),\n        null,\n      );\n      assert.deepEqual(\n        this.testemEvents.stateManager.getReplayExecutionMap(),\n        mockReplayExecutionMap,\n      );\n    });\n\n    it('should clean up states from stateManager when all launched browsers complete tests', function () {\n      const mockReplayExecutionMap = { 1: ['a'] };\n      this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1);\n      this.testemEvents.stateManager.setReplayExecutionMap(\n        mockReplayExecutionMap,\n      );\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n\n      this.testemEvents.completedBrowsersHandler(\n        1,\n        1010,\n        mockUi,\n        new Map([['loadBalance', true]]),\n        '0000',\n      );\n\n      assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 0);\n      assert.deepEqual(\n        this.testemEvents.stateManager.getTestModuleQueue(),\n        null,\n      );\n      assert.deepEqual(\n        this.testemEvents.stateManager.getReplayExecutionModuleQueue(),\n        null,\n      );\n      assert.deepEqual(\n        this.testemEvents.stateManager.getReplayExecutionMap(),\n        mockReplayExecutionMap,\n      );\n    });\n\n    it('should clean up states from stateManager when all launched browsers exited', function () {\n      this.testemEvents.stateManager.setTestModuleQueue([]);\n      this.testemEvents.stateManager.addToStartedLaunchers(1010);\n      this.testemEvents.stateManager.addToStartedLaunchers(1011);\n\n      this.testemEvents.completedBrowsersHandler(\n        2,\n        1010,\n        mockUi,\n        new Map([['loadBalance', true]]),\n        '0000',\n      );\n\n      assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 0);\n      assert.deepEqual(this.testemEvents.stateManager.getTestModuleQueue(), []);\n\n      this.testemEvents.completedBrowsersHandler(\n        2,\n        1011,\n        mockUi,\n        new Map([['loadBalance', true]]),\n        '0000',\n      );\n\n      assert.deepEqual(\n        this.testemEvents.stateManager.getTestModuleQueue(),\n        null,\n      );\n    });\n  });\n});\n"
  },
  {
    "path": "node-tests/unit/utils/tests-options-validator-test.js",
    "content": "'use strict';\n\nconst assert = require('assert');\nconst fixturify = require('fixturify');\nconst fs = require('fs-extra');\nconst TestOptionsValidator = require('../../../lib/utils/tests-options-validator');\nconst TestExecutionJson = {\n  numberOfBrowsers: 2,\n  browserToModuleMap: {\n    1: ['/tests/integration/components/my-component-test'],\n    2: ['/tests/integration/components/navigating-component-test'],\n  },\n};\n\ndescribe('TestOptionsValidator', function () {\n  function validateCommand(validator, cmd) {\n    switch (cmd) {\n      case 'Split':\n        return validator.validateSplit();\n      case 'Random':\n        return validator.validateRandom();\n      case 'Parallel':\n        return validator.validateParallel();\n      case 'writeExecutionFile':\n        return validator.validateWriteExecutionFile();\n      case 'LoadBalance':\n        return validator.validateLoadBalance();\n      case 'ReplayExecution':\n        return validator.validateReplayExecution();\n      default:\n        throw new Error('invalid command passed');\n    }\n  }\n\n  function shouldThrow(cmd, options, message, emberCliVer = '3.7.0') {\n    const validator = new TestOptionsValidator(options, emberCliVer);\n    assert.throws(() => validateCommand(validator, cmd), message);\n  }\n\n  function shouldEqual(cmd, options, value, emberCliVer = '3.7.0') {\n    const validator = new TestOptionsValidator(options, emberCliVer);\n    assert.strictEqual(validateCommand(validator, cmd), value);\n  }\n\n  function shouldWarn(cmd, options, value, emberCliVer = '3.7.0') {\n    let originalWarn = console.warn;\n    let warnCalled = 0;\n    let warnMessage = '';\n    console.warn = function (message) {\n      warnCalled++;\n      warnMessage = message;\n    };\n\n    const validator = new TestOptionsValidator(options, emberCliVer);\n    assert.notEqual(validateCommand(validator, cmd), undefined);\n    assert.strictEqual(warnCalled, 1);\n    assert.strictEqual(warnMessage, value);\n\n    console.warn = originalWarn;\n  }\n\n  describe('shouldSplit', function () {\n    function shouldSplitThrows(options, message) {\n      shouldThrow('Split', options, message);\n    }\n\n    function shouldSplitEqual(options, message) {\n      shouldEqual('Split', options, message);\n    }\n\n    it('should log a warning if `split` is less than 2', function () {\n      shouldWarn(\n        'Split',\n        { split: 1 },\n        'You should specify a number of files greater than 1 to split your tests across. Defaulting to 1 split which is the same as not using `split`.',\n      );\n    });\n\n    it('should throw an error if `partition` is used without `split`', function () {\n      shouldSplitThrows(\n        { partition: [1] },\n        /You must specify a `split` value in order to use `partition`/,\n      );\n    });\n\n    it('should throw an error if `partition` contains a value less than 1', function () {\n      shouldSplitThrows(\n        { split: 2, partition: [1, 0] },\n        /Split tests are one-indexed, so you must specify partition values greater than or equal to 1./,\n      );\n    });\n\n    it('should throw an error if `partition` contains a value greater than `split`', function () {\n      shouldSplitThrows(\n        { split: 2, partition: [1, 3] },\n        /You must specify `partition` values that are less than or equal to your `split` value./,\n      );\n    });\n\n    it('should throw an error if `partition` contains duplicate values', function () {\n      shouldSplitThrows(\n        { split: 2, partition: [1, 2, 1] },\n        /You cannot specify the same partition value twice. 1 is repeated./,\n      );\n    });\n\n    it('should return true if using `split`', function () {\n      shouldSplitEqual({ split: 2 }, true);\n    });\n\n    it('should return true if using `split` and `partition`', function () {\n      shouldSplitEqual({ split: 2, partition: [1] }, true);\n    });\n\n    it('should return false if not using `split`', function () {\n      shouldSplitEqual({}, false);\n    });\n  });\n\n  describe('shouldRandomize', function () {\n    function shouldRandomizeEqual(options, message) {\n      shouldEqual('Random', options, message);\n    }\n\n    it('should return true if `random` is an empty string', function () {\n      shouldRandomizeEqual({ random: '' }, true);\n    });\n\n    it('should return true if `random` is set to a string', function () {\n      shouldRandomizeEqual({ random: '1337' }, true);\n    });\n\n    it('should return false if `random` is a non-string', function () {\n      shouldRandomizeEqual({ random: true }, false);\n    });\n\n    it('should return false if `random` is not used', function () {\n      shouldRandomizeEqual({}, false);\n    });\n  });\n\n  describe('shouldParallelize', function () {\n    it('should throw an error if `parallel` is not a numeric value', function () {\n      shouldThrow(\n        'Parallel',\n        { parallel: '--reporter' },\n        /EmberExam: You must specify a Numeric value to 'parallel'. Value passed: --reporter/,\n      );\n    });\n\n    it('should throw an error if `split` is not being used', function () {\n      shouldThrow(\n        'Parallel',\n        { parallel: 1 },\n        /You must specify the `split` option in order to run your tests in parallel/,\n      );\n    });\n\n    it('should throw an error if used with `replay-execution`', function () {\n      shouldThrow(\n        'Parallel',\n        { replayExecution: 'abc', parallel: 1 },\n        /You must not use the `replay-execution` option with the `parallel` option./,\n      );\n    });\n\n    it('should throw an error if used with `replay-browser`', function () {\n      shouldThrow(\n        'Parallel',\n        { replayBrowser: 2, parallel: 1 },\n        /You must not use the `replay-browser` option with the `parallel` option./,\n      );\n    });\n\n    it('should throw an error if parallel is > 1 when used with `split`', function () {\n      shouldThrow(\n        'Parallel',\n        { split: 2, parallel: 2 },\n        /When used with `split` or `partition`, `parallel` does not accept a value other than 1./,\n      );\n    });\n\n    it('should throw an error if 0 is passed while loadBalance is specified', function () {\n      shouldThrow(\n        'Parallel',\n        { loadBalance: 2, parallel: 0 },\n        /You must specify a value greater than 1 to `parallel`./,\n      );\n    });\n\n    it('should return true', function () {\n      shouldEqual('Parallel', { split: 2, parallel: 1 }, true);\n    });\n  });\n\n  describe('ShouldWriteExecutionFile', function () {\n    it('should return false when not passed', function () {\n      shouldEqual(\n        'writeExecutionFile',\n        {\n          loadBalance: true,\n          parallel: 2,\n          launch: 'false',\n        },\n        false,\n      );\n    });\n\n    it('should throw an error if `write-execution-file` is used without `load-balance`', function () {\n      shouldThrow(\n        'writeExecutionFile',\n        { writeExecutionFile: true },\n        /You must run test suite with the `load-balance` option in order to use the `write-execution-file` option./,\n      );\n    });\n\n    it('should throw an error if `write-execution-file` is used without `load-balance`', function () {\n      shouldThrow(\n        'writeExecutionFile',\n        {\n          split: 2,\n          partition: 1,\n          writeExecutionFile: true,\n        },\n        /You must run test suite with the `load-balance` option in order to use the `write-execution-file` option./,\n      );\n    });\n\n    it('should throw an error if `write-execution-file` is used without `load-balance`', function () {\n      shouldThrow(\n        'writeExecutionFile',\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [1, 2],\n          writeExecutionFile: true,\n        },\n        /You must run test suite with the `load-balance` option in order to use the `write-execution-file` option./,\n      );\n    });\n\n    it('should throw an error if `write-execution-file` is used with `no-launch`', function () {\n      shouldThrow(\n        'writeExecutionFile',\n        {\n          loadBalance: true,\n          parallel: 1,\n          launch: 'false',\n          writeExecutionFile: true,\n        },\n        /You must not use no-launch with write-execution-file option./,\n      );\n    });\n\n    it('should return true', function () {\n      shouldEqual(\n        'writeExecutionFile',\n        {\n          loadBalance: true,\n          parallel: 2,\n          writeExecutionFile: true,\n        },\n        true,\n      );\n    });\n  });\n\n  describe('shouldLoadBalance', function () {\n    it('should throw an error if ember-cli version is below 3.2.0', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true, replayExecution: 'abc' },\n        /You must be using ember-cli version \\^3.2.0 for this feature to work properly./,\n        '3.0.0',\n      );\n    });\n\n    it('should throw an error if ember-cli version is ~3.1.0', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true, replayExecution: 'abc' },\n        /You must be using ember-cli version \\^3.2.0 for this feature to work properly./,\n        '~3.1.0',\n      );\n    });\n\n    it('should throw an error if `replayExecution` is passed', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true, replayExecution: 'abc' },\n        /You must not use the `replay-execution` option with the `load-balance` option./,\n      );\n    });\n\n    it('should throw an error if `replayBrowser` is passed', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true, replayBrowser: 3 },\n        /You must not use the `replay-browser` option with the `load-balance` option./,\n      );\n    });\n\n    it('should throw an error if `parallel` is not defined', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true },\n        /You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`./,\n      );\n    });\n\n    it('should throw an error if `parallel` has a value less than 1', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true, parallel: 0 },\n        /You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`./,\n      );\n    });\n\n    it('should throw an error if `no-launch` is passed', function () {\n      shouldThrow(\n        'LoadBalance',\n        { loadBalance: true, parallel: 0, launch: 'false' },\n        /You must not use `no-launch` option with the `load-balance` option./,\n      );\n    });\n\n    it('should return true', function () {\n      shouldEqual('LoadBalance', { loadBalance: true, parallel: 3 }, true);\n    });\n  });\n\n  describe('shouldReplayExecution', function () {\n    before(function () {\n      fixturify.writeSync(process.cwd(), {\n        'test-execution-0000000.json': JSON.stringify(TestExecutionJson),\n      });\n    });\n    after(function () {\n      fs.unlink('test-execution-0000000.json');\n    });\n\n    it('should throw an error if `replay-browser` contains a value less than 1', function () {\n      shouldThrow(\n        'ReplayExecution',\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [1, 0],\n        },\n        /You must specify replay-browser values greater than or equal to 1./,\n      );\n    });\n\n    it('should throw an error if `replay-browser` contains duplicate values', function () {\n      shouldThrow(\n        'ReplayExecution',\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [1, 2, 1],\n        },\n        /You cannot specify the same replayBrowser value twice. 1 is repeated./,\n      );\n    });\n\n    it('should throw an error if `replay-browser` contains an invalid browser number', function () {\n      shouldThrow(\n        'ReplayExecution',\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [3, 1],\n        },\n        /You must specify replayBrowser value smaller than a number of browsers in the specified json file./,\n      );\n    });\n\n    it('should throw an error if `no-launch` is used with `replay-execution`.', function () {\n      shouldThrow(\n        'ReplayExecution',\n        {\n          replayExecution: 'test-execution-0000000.json',\n          launch: 'false',\n        },\n        /You must not use `no-launch` option with the `replay-execution` option./,\n      );\n    });\n\n    it('should return true', function () {\n      shouldEqual(\n        'ReplayExecution',\n        {\n          replayExecution: 'test-execution-0000000.json',\n          replayBrowser: [1, 2],\n        },\n        true,\n      );\n    });\n  });\n});\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"ember-exam\",\n  \"version\": \"10.1.0\",\n  \"description\": \"Run your tests with randomization, splitting, and parallelization for beautiful tests.\",\n  \"keywords\": [\n    \"ember-addon\"\n  ],\n  \"homepage\": \"https://ember-cli.github.io/ember-exam\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"https://github.com/ember-cli/ember-exam.git\"\n  },\n  \"license\": \"MIT\",\n  \"author\": \"\",\n  \"directories\": {\n    \"doc\": \"doc\",\n    \"test\": \"tests\"\n  },\n  \"scripts\": {\n    \"build\": \"ember build --environment=production\",\n    \"coverage\": \"nyc report --reporter=text-lcov | codeclimate-test-reporter\",\n    \"format\": \"prettier . --write\",\n    \"lint\": \"concurrently \\\"npm:lint:*(!fix)\\\" --names \\\"lint:\\\"\",\n    \"lint:format\": \"prettier . --cache --check\",\n    \"lint:js\": \"eslint . && pnpm --filter './test-apps/**' lint:js\",\n    \"start\": \"ember serve\",\n    \"test\": \"concurrently \\\"npm:test:*\\\"\",\n    \"test:ember\": \"ember test\",\n    \"test:ember-compatibility\": \"ember try:each\",\n    \"test:node\": \"nyc mocha 'node-tests/**/*-test.js'\"\n  },\n  \"nyc\": {\n    \"exclude\": [\n      \"config\",\n      \"node-tests\",\n      \"tests\"\n    ]\n  },\n  \"dependencies\": {\n    \"@babel/core\": \"^7.28.0\",\n    \"chalk\": \"^5.3.0\",\n    \"cli-table3\": \"^0.6.0\",\n    \"debug\": \"^4.2.0\",\n    \"ember-auto-import\": \"^2.7.0\",\n    \"ember-cli-babel\": \"^8.2.0\",\n    \"execa\": \"^8.0.1\",\n    \"fs-extra\": \"^11.2.0\",\n    \"js-yaml\": \"^4.0.0\",\n    \"npmlog\": \"^7.0.0\",\n    \"rimraf\": \"^5.0.0\",\n    \"semver\": \"^7.3.2\",\n    \"silent-error\": \"^1.1.1\"\n  },\n  \"devDependencies\": {\n    \"@babel/plugin-proposal-decorators\": \"7.28.6\",\n    \"@ember/optional-features\": \"2.3.0\",\n    \"@ember/string\": \"3.1.1\",\n    \"@ember/test-helpers\": \"3.3.1\",\n    \"@embroider/test-setup\": \"3.0.3\",\n    \"auto-dist-tag\": \"2.1.1\",\n    \"codeclimate-test-reporter\": \"0.5.1\",\n    \"concurrently\": \"9.2.1\",\n    \"ember-cli\": \"5.12.0\",\n    \"ember-cli-dependency-checker\": \"3.3.3\",\n    \"ember-cli-htmlbars\": \"6.3.0\",\n    \"ember-cli-inject-live-reload\": \"2.1.0\",\n    \"ember-concurrency\": \"4.0.6\",\n    \"ember-eslint\": \"0.6.1\",\n    \"ember-load-initializers\": \"2.1.2\",\n    \"ember-qunit\": \"9.0.4\",\n    \"ember-resolver\": \"11.0.1\",\n    \"ember-source\": \"6.10.1\",\n    \"ember-source-channel-url\": \"3.0.0\",\n    \"ember-try\": \"3.0.0\",\n    \"eslint\": \"9.39.4\",\n    \"fixturify\": \"3.0.0\",\n    \"glob\": \"11.1.0\",\n    \"globals\": \"16.5.0\",\n    \"loader.js\": \"4.7.0\",\n    \"mocha\": \"11.7.5\",\n    \"nyc\": \"17.1.0\",\n    \"prettier\": \"3.8.1\",\n    \"prettier-plugin-ember-template-tag\": \"2.1.3\",\n    \"qunit\": \"2.25.0\",\n    \"release-plan\": \"^0.17.4\",\n    \"rsvp\": \"4.8.5\",\n    \"sinon\": \"21.0.3\",\n    \"typescript\": \"5.9.3\",\n    \"webpack\": \"5.104.1\"\n  },\n  \"peerDependencies\": {\n    \"ember-qunit\": \"*\",\n    \"ember-source\": \">= 4.0.0\",\n    \"qunit\": \"*\"\n  },\n  \"packageManager\": \"pnpm@10.33.0\",\n  \"engines\": {\n    \"node\": \">= 18\"\n  },\n  \"volta\": {\n    \"node\": \"18.20.8\"\n  },\n  \"publishConfig\": {\n    \"registry\": \"https://registry.npmjs.org\"\n  },\n  \"pnpm\": {\n    \"overrides\": {\n      \"@glimmer/syntax\": \"^0.95.0\",\n      \"ember-exam\": \"workspace:*\"\n    }\n  },\n  \"ember\": {\n    \"edition\": \"octane\"\n  },\n  \"ember-addon\": {\n    \"configPath\": \"tests/dummy/config\"\n  }\n}\n"
  },
  {
    "path": "pnpm-workspace.yaml",
    "content": "packages:\n  - .\n  - ./docs-app\n  - test-apps/*\n\n## We do not want to auto-install peers because\n## We want to ensure we understand what is actually required\n## So that if a consuming app uses strict mode things will work\n##\n## This said, Apps should probably set this to true\nautoInstallPeers: false\n\n## We use so many similarly grouped peers, we want to make the\n## peer-groups easier to distinguish.\n## This forces a shorter sha for all groups (vs the default of 1000)\npeersSuffixMaxLength: 40\nvirtualStoreDirMaxLength: 40\n\n## If a dependency is not declared, we do not want to accidentally\n## resolve it from the workspace root. This is a common source of\n## bugs in monorepos.\nresolvePeersFromWorkspaceRoot: false\n\n## This also means we do not want to hoist them to the root\n## As this would both expose them to all other packages AND\n## results in them using symlinks instead of hardlinks\nhoistWorkspacePackages: false\n\n\n## Our Workspace Packages are \"injected\" so prevent\n## devDependencies from being exposed and to allow\n## for us to test optional peerDependencies.\ninjectWorkspacePackages: true\n\n## Update injected dependencies when needed.\n## Unfortunately, this does not run after scripts in\n## the monorepo root, so we have added a special \"sync\"\n## script to handle this.\n##\n## NOTE: sync always happens after install (automatically from pnpm).\n##       this script exists so we can manually sync injected deps if we need to\nsyncInjectedDepsAfterScripts:\n- sync\n\n## It may also be good to understand that we intentionally are\n## not using `hoisting` and using `injected` workspace packages\n## to ensure properly isolated dep trees for test apps.\n#\n## things like `moduleExists` from @embroider/macros will report false answers\n## for the test apps unless we avoid hoisting.\n##\n## Note, if we ever need to hoist something, we can use hoist-pattern[]=\"\"\n## For instance: hoist-pattern[]=*node-fetch*\n## to hoist the specific thing we need and set this to `true`. When true\n## and a hoist-pattern is present only the hoist-pattern will be hoisted.\nhoist: false\n\n## In keeping with our \"no hoisting\" and \"no auto-peers\" and\n## \"isolated dep trees\", we also want to avoid other things\n## that lead to reliance on hoisting.\n## In general, deduping leads to hoisting. This particular\n## setting causes direct-dependencies to resolve from the\n## workspace root if already in root. We don't want this.\ndedupeDirectDeps: false\n\n## We do not want to dedupe peer dependencies as this\n## results in hoisting and violates optional peer isolation.\ndedupePeerDependents: false\n\n## We do not want to dedupe injected dependencies as this\n## results in hoisting and violates optional peer isolation.\ndedupeInjectedDeps: false\n\noverrides:\n  # Ember-try brings in old dependencies that include a bad set of private / unpublished glimmer stuff\n  '@glimmer/syntax': '^0.94.0'\n"
  },
  {
    "path": "test-apps/broccoli/.editorconfig",
    "content": "# EditorConfig helps developers define and maintain consistent\n# coding styles between different editors and IDEs\n# editorconfig.org\n\nroot = true\n\n[*]\nend_of_line = lf\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newline = true\nindent_style = space\nindent_size = 2\n\n[*.hbs]\ninsert_final_newline = false\n\n[*.{diff,md}]\ntrim_trailing_whitespace = false\n"
  },
  {
    "path": "test-apps/broccoli/.ember-cli",
    "content": "{\n  /**\n    Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript\n    rather than JavaScript by default, when a TypeScript version of a given blueprint is available.\n  */\n  \"isTypeScriptProject\": false\n}\n"
  },
  {
    "path": "test-apps/broccoli/.github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - main\n      - master\n  pull_request: {}\n\nconcurrency:\n  group: ci-${{ github.head_ref || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  lint:\n    name: \"Lint\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - name: Install Node\n        uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: npm\n      - name: Install Dependencies\n        run: npm ci\n      - name: Lint\n        run: npm run lint\n\n  test:\n    name: \"Test\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - name: Install Node\n        uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: npm\n      - name: Install Dependencies\n        run: npm ci\n      - name: Run Tests\n        run: npm test\n"
  },
  {
    "path": "test-apps/broccoli/.gitignore",
    "content": "# compiled output\n/dist/\n/declarations/\n\n# dependencies\n/node_modules/\n\n# misc\n/.env*\n/.pnp*\n/.eslintcache\n/coverage/\n/npm-debug.log*\n/testem.log\n/yarn-error.log\n\n# ember-try\n/.node_modules.ember-try/\n/npm-shrinkwrap.json.ember-try\n/package.json.ember-try\n/package-lock.json.ember-try\n/yarn.lock.ember-try\n\n# broccoli-debug\n/DEBUG/\n"
  },
  {
    "path": "test-apps/broccoli/.prettierignore",
    "content": "# unconventional js\n/blueprints/*/files/\n\n# compiled output\n/dist/\n\n# misc\n/coverage/\n!.*\n.*/\n\n# ember-try\n/.node_modules.ember-try/\n"
  },
  {
    "path": "test-apps/broccoli/.prettierrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  overrides: [\n    {\n      files: '*.{js,ts}',\n      options: {\n        singleQuote: true,\n      },\n    },\n  ],\n};\n"
  },
  {
    "path": "test-apps/broccoli/.stylelintignore",
    "content": "# unconventional files\n/blueprints/*/files/\n\n# compiled output\n/dist/\n\n# addons\n/.node_modules.ember-try/\n"
  },
  {
    "path": "test-apps/broccoli/.stylelintrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  extends: ['stylelint-config-standard', 'stylelint-prettier/recommended'],\n};\n"
  },
  {
    "path": "test-apps/broccoli/.template-lintrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  extends: 'recommended',\n};\n"
  },
  {
    "path": "test-apps/broccoli/.watchmanconfig",
    "content": "{\n  \"ignore_dirs\": [\"dist\"]\n}\n"
  },
  {
    "path": "test-apps/broccoli/README.md",
    "content": "# broccoli\n\nThis README outlines the details of collaborating on this Ember application.\nA short introduction of this app could easily go here.\n\n## Prerequisites\n\nYou will need the following things properly installed on your computer.\n\n- [Git](https://git-scm.com/)\n- [Node.js](https://nodejs.org/) (with npm)\n- [Ember CLI](https://cli.emberjs.com/release/)\n- [Google Chrome](https://google.com/chrome/)\n\n## Installation\n\n- `git clone <repository-url>` this repository\n- `cd broccoli`\n- `npm install`\n\n## Running / Development\n\n- `npm run start`\n- Visit your app at [http://localhost:4200](http://localhost:4200).\n- Visit your tests at [http://localhost:4200/tests](http://localhost:4200/tests).\n\n### Code Generators\n\nMake use of the many generators for code, try `ember help generate` for more details\n\n### Running Tests\n\n- `npm run test`\n- `npm run test:ember -- --server`\n\n### Linting\n\n- `npm run lint`\n- `npm run lint:fix`\n\n### Building\n\n- `npm exec ember build` (development)\n- `npm run build` (production)\n\n### Deploying\n\nSpecify what it takes to deploy your app.\n\n## Further Reading / Useful Links\n\n- [ember.js](https://emberjs.com/)\n- [ember-cli](https://cli.emberjs.com/release/)\n- Development Browser Extensions\n  - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi)\n  - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/)\n"
  },
  {
    "path": "test-apps/broccoli/app/app.js",
    "content": "import Application from '@ember/application';\nimport Resolver from 'ember-resolver';\nimport loadInitializers from 'ember-load-initializers';\nimport config from 'broccoli/config/environment';\n\nexport default class App extends Application {\n  modulePrefix = config.modulePrefix;\n  podModulePrefix = config.podModulePrefix;\n  Resolver = Resolver;\n}\n\nloadInitializers(App, config.modulePrefix);\n"
  },
  {
    "path": "test-apps/broccoli/app/components/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/broccoli/app/controllers/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/broccoli/app/helpers/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/broccoli/app/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>Broccoli</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n\n    <link integrity=\"\" rel=\"stylesheet\" href=\"{{rootURL}}assets/vendor.css\">\n    <link integrity=\"\" rel=\"stylesheet\" href=\"{{rootURL}}assets/broccoli.css\">\n\n    {{content-for \"head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n\n    <script src=\"{{rootURL}}assets/vendor.js\"></script>\n    <script src=\"{{rootURL}}assets/broccoli.js\"></script>\n\n    {{content-for \"body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "test-apps/broccoli/app/models/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/broccoli/app/router.js",
    "content": "import EmberRouter from '@ember/routing/router';\nimport config from 'broccoli/config/environment';\n\nexport default class Router extends EmberRouter {\n  location = config.locationType;\n  rootURL = config.rootURL;\n}\n\nRouter.map(function () {});\n"
  },
  {
    "path": "test-apps/broccoli/app/routes/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/broccoli/app/styles/app.css",
    "content": "/* Ember supports plain CSS out of the box. More info: https://cli.emberjs.com/release/advanced-use/stylesheets/ */\n"
  },
  {
    "path": "test-apps/broccoli/app/templates/application.hbs",
    "content": "{{page-title \"Broccoli\"}}\n\n{{outlet}}\n\n{{! The following component displays Ember's default welcome message. }}\n<WelcomePage />\n{{! Feel free to remove this! }}"
  },
  {
    "path": "test-apps/broccoli/config/ember-cli-update.json",
    "content": "{\n  \"schemaVersion\": \"1.0.0\",\n  \"packages\": [\n    {\n      \"name\": \"ember-cli\",\n      \"version\": \"5.12.0\",\n      \"blueprints\": [\n        {\n          \"name\": \"app\",\n          \"outputRepo\": \"https://github.com/ember-cli/ember-new-output\",\n          \"codemodsSource\": \"ember-app-codemods-manifest@1\",\n          \"isBaseBlueprint\": true,\n          \"options\": [\n            \"--ci-provider=github\"\n          ]\n        }\n      ]\n    }\n  ]\n}\n"
  },
  {
    "path": "test-apps/broccoli/config/environment.js",
    "content": "'use strict';\n\nmodule.exports = function (environment) {\n  const ENV = {\n    modulePrefix: 'broccoli',\n    environment,\n    rootURL: '/',\n    locationType: 'history',\n    EmberENV: {\n      EXTEND_PROTOTYPES: false,\n      FEATURES: {\n        // Here you can enable experimental features on an ember canary build\n        // e.g. EMBER_NATIVE_DECORATOR_SUPPORT: true\n      },\n    },\n\n    APP: {\n      // Here you can pass flags/options to your application instance\n      // when it is created\n    },\n  };\n\n  if (environment === 'development') {\n    // ENV.APP.LOG_RESOLVER = true;\n    // ENV.APP.LOG_ACTIVE_GENERATION = true;\n    // ENV.APP.LOG_TRANSITIONS = true;\n    // ENV.APP.LOG_TRANSITIONS_INTERNAL = true;\n    // ENV.APP.LOG_VIEW_LOOKUPS = true;\n  }\n\n  if (environment === 'test') {\n    // Testem prefers this...\n    ENV.locationType = 'none';\n\n    // keep test console output quieter\n    ENV.APP.LOG_ACTIVE_GENERATION = false;\n    ENV.APP.LOG_VIEW_LOOKUPS = false;\n\n    ENV.APP.rootElement = '#ember-testing';\n    ENV.APP.autoboot = false;\n  }\n\n  if (environment === 'production') {\n    // here you can enable a production-specific feature\n  }\n\n  return ENV;\n};\n"
  },
  {
    "path": "test-apps/broccoli/config/optional-features.json",
    "content": "{\n  \"application-template-wrapper\": false,\n  \"default-async-observers\": true,\n  \"jquery-integration\": false,\n  \"template-only-glimmer-components\": true,\n  \"no-implicit-route-model\": true\n}\n"
  },
  {
    "path": "test-apps/broccoli/config/targets.js",
    "content": "'use strict';\n\nconst browsers = [\n  'last 1 Chrome versions',\n  'last 1 Firefox versions',\n  'last 1 Safari versions',\n];\n\nmodule.exports = {\n  browsers,\n};\n"
  },
  {
    "path": "test-apps/broccoli/ember-cli-build.js",
    "content": "'use strict';\n\nconst EmberApp = require('ember-cli/lib/broccoli/ember-app');\n\nmodule.exports = function (defaults) {\n  const app = new EmberApp(defaults, {\n    // Add options here\n  });\n\n  return app.toTree();\n};\n"
  },
  {
    "path": "test-apps/broccoli/eslint.config.mjs",
    "content": "import { ember } from 'ember-eslint';\nimport * as url from 'url';\n\n// Needed until Node 20\nconst dirname = url.fileURLToPath(new URL('.', import.meta.url));\n\nexport default [...ember.recommended(dirname)];\n"
  },
  {
    "path": "test-apps/broccoli/package.json",
    "content": "{\n  \"name\": \"broccoli\",\n  \"version\": \"0.0.0\",\n  \"private\": true,\n  \"description\": \"Small description for broccoli goes here\",\n  \"repository\": \"\",\n  \"license\": \"MIT\",\n  \"author\": \"\",\n  \"directories\": {\n    \"doc\": \"doc\",\n    \"test\": \"tests\"\n  },\n  \"scripts\": {\n    \"build\": \"ember build --environment=production\",\n    \"lint\": \"concurrently \\\"npm:lint:*(!fix)\\\" --names \\\"lint:\\\"\",\n    \"lint:fix\": \"concurrently \\\"npm:lint:*:fix\\\" --names \\\"fix:\\\"\",\n    \"lint:hbs\": \"ember-template-lint .\",\n    \"lint:hbs:fix\": \"ember-template-lint . --fix\",\n    \"lint:js\": \"eslint . --cache\",\n    \"lint:js:fix\": \"eslint . --fix\",\n    \"start\": \"ember serve\",\n    \"test\": \"concurrently \\\"npm:lint\\\" \\\"npm:test:*\\\" --names \\\"lint,test:\\\"\",\n    \"test:ember\": \"ember test\"\n  },\n  \"devDependencies\": {\n    \"@babel/core\": \"7.28.6\",\n    \"@babel/plugin-proposal-decorators\": \"7.28.6\",\n    \"@ember/optional-features\": \"2.3.0\",\n    \"@ember/string\": \"4.0.1\",\n    \"@ember/test-helpers\": \"3.3.1\",\n    \"@glimmer/component\": \"1.1.2\",\n    \"@glimmer/tracking\": \"1.1.2\",\n    \"broccoli-asset-rev\": \"3.0.0\",\n    \"concurrently\": \"8.2.2\",\n    \"ember-auto-import\": \"2.12.1\",\n    \"ember-cli\": \"5.12.0\",\n    \"ember-cli-app-version\": \"7.0.0\",\n    \"ember-cli-babel\": \"8.2.0\",\n    \"ember-cli-clean-css\": \"3.0.0\",\n    \"ember-cli-dependency-checker\": \"3.3.3\",\n    \"ember-cli-htmlbars\": \"6.3.0\",\n    \"ember-cli-inject-live-reload\": \"2.1.0\",\n    \"ember-cli-sri\": \"2.1.1\",\n    \"ember-cli-terser\": \"4.0.2\",\n    \"ember-eslint\": \"0.6.1\",\n    \"ember-load-initializers\": \"2.1.2\",\n    \"ember-modifier\": \"4.2.2\",\n    \"ember-page-title\": \"8.2.4\",\n    \"ember-qunit\": \"8.1.1\",\n    \"ember-resolver\": \"12.0.1\",\n    \"ember-source\": \"5.12.0\",\n    \"ember-template-lint\": \"6.1.0\",\n    \"ember-welcome-page\": \"7.0.2\",\n    \"eslint\": \"9.39.4\",\n    \"loader.js\": \"4.7.0\",\n    \"prettier\": \"3.8.1\",\n    \"qunit\": \"2.25.0\",\n    \"qunit-dom\": \"3.5.0\",\n    \"tracked-built-ins\": \"3.4.0\",\n    \"webpack\": \"5.104.1\"\n  },\n  \"dependencies\": {\n    \"ember-exam\": \"workspace:*\"\n  },\n  \"engines\": {\n    \"node\": \">= 18\"\n  },\n  \"ember\": {\n    \"edition\": \"octane\"\n  }\n}\n"
  },
  {
    "path": "test-apps/broccoli/public/robots.txt",
    "content": "# http://www.robotstxt.org\nUser-agent: *\nDisallow:\n"
  },
  {
    "path": "test-apps/broccoli/testem.js",
    "content": "'use strict';\n\nmodule.exports = {\n  test_page: 'tests/index.html?hidepassed',\n  disable_watching: true,\n  launch_in_ci: ['Chrome'],\n  launch_in_dev: ['Chrome'],\n  browser_start_timeout: 120,\n  browser_args: {\n    Chrome: {\n      ci: [\n        // --no-sandbox is needed when running Chrome inside a container\n        process.env.CI ? '--no-sandbox' : null,\n        '--headless',\n        '--disable-dev-shm-usage',\n        '--disable-software-rasterizer',\n        '--mute-audio',\n        '--remote-debugging-port=0',\n        '--window-size=1440,900',\n      ].filter(Boolean),\n    },\n  },\n};\n"
  },
  {
    "path": "test-apps/broccoli/tests/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>Broccoli Tests</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n    {{content-for \"test-head\"}}\n\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/vendor.css\">\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/broccoli.css\">\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/test-support.css\">\n\n    {{content-for \"head-footer\"}}\n    {{content-for \"test-head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n    {{content-for \"test-body\"}}\n\n    <div id=\"qunit\"></div>\n    <div id=\"qunit-fixture\">\n      <div id=\"ember-testing-container\">\n        <div id=\"ember-testing\"></div>\n      </div>\n    </div>\n\n    <script src=\"/testem.js\" integrity=\"\" data-embroider-ignore></script>\n    <script src=\"{{rootURL}}assets/vendor.js\"></script>\n    <script src=\"{{rootURL}}assets/test-support.js\"></script>\n    <script src=\"{{rootURL}}assets/broccoli.js\"></script>\n    <script src=\"{{rootURL}}assets/tests.js\"></script>\n\n    {{content-for \"body-footer\"}}\n    {{content-for \"test-body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "test-apps/broccoli/tests/test-helper.js",
    "content": "import Application from 'broccoli/app';\nimport config from 'broccoli/config/environment';\nimport * as QUnit from 'qunit';\nimport { setApplication } from '@ember/test-helpers';\nimport { setup } from 'qunit-dom';\nimport { start } from 'ember-qunit';\n\nsetApplication(Application.create(config.APP));\n\nsetup(QUnit.assert);\n\nstart();\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.editorconfig",
    "content": "# EditorConfig helps developers define and maintain consistent\n# coding styles between different editors and IDEs\n# editorconfig.org\n\nroot = true\n\n[*]\nend_of_line = lf\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newline = true\nindent_style = space\nindent_size = 2\n\n[*.hbs]\ninsert_final_newline = false\n\n[*.{diff,md}]\ntrim_trailing_whitespace = false\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.ember-cli",
    "content": "{\n  /**\n    Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript\n    rather than JavaScript by default, when a TypeScript version of a given blueprint is available.\n  */\n  \"isTypeScriptProject\": false,\n\n  /**\n    Setting `componentAuthoringFormat` to \"strict\" will force the blueprint generators to generate GJS\n    or GTS files for the component and the component rendering test. \"loose\" is the default.\n  */\n  \"componentAuthoringFormat\": \"loose\",\n\n  /**\n    Setting `routeAuthoringFormat` to \"strict\" will force the blueprint generators to generate GJS\n    or GTS templates for routes. \"loose\" is the default\n  */\n  \"routeAuthoringFormat\": \"loose\"\n}\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - main\n      - master\n  pull_request: {}\n\nconcurrency:\n  group: ci-${{ github.head_ref || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  lint:\n    name: \"Lint\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - name: Install Node\n        uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: npm\n      - name: Install Dependencies\n        run: npm ci\n      - name: Lint\n        run: npm run lint\n\n  test:\n    name: \"Test\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - name: Install Node\n        uses: actions/setup-node@v6\n        with:\n          node-version: 18\n          cache: npm\n      - name: Install Dependencies\n        run: npm ci\n      - name: Run Tests\n        run: npm test\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.gitignore",
    "content": "# compiled output\n/dist/\n/declarations/\n\n# dependencies\n/node_modules/\n\n# misc\n/.env*\n/.pnp*\n/.eslintcache\n/coverage/\n/npm-debug.log*\n/testem.log\n/yarn-error.log\n\n# broccoli-debug\n/DEBUG/\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.prettierignore",
    "content": "# unconventional js\n/blueprints/*/files/\n\n# compiled output\n/dist/\n\n# misc\n/coverage/\n!.*\n.*/\n/pnpm-lock.yaml\nember-cli-update.json\n*.html\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.prettierrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  plugins: ['prettier-plugin-ember-template-tag'],\n  overrides: [\n    {\n      files: '*.{js,gjs,ts,gts,mjs,mts,cjs,cts}',\n      options: {\n        singleQuote: true,\n        templateSingleQuote: false,\n      },\n    },\n  ],\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.stylelintignore",
    "content": "# unconventional files\n/blueprints/*/files/\n\n# compiled output\n/dist/\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.stylelintrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  extends: ['stylelint-config-standard'],\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.template-lintrc.js",
    "content": "'use strict';\n\nmodule.exports = {\n  extends: 'recommended',\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/.watchmanconfig",
    "content": "{\n  \"ignore_dirs\": [\"dist\"]\n}\n"
  },
  {
    "path": "test-apps/embroider3-webpack/README.md",
    "content": "# embroider3-webpack\n\nThis README outlines the details of collaborating on this Ember application.\nA short introduction of this app could easily go here.\n\n## Prerequisites\n\nYou will need the following things properly installed on your computer.\n\n- [Git](https://git-scm.com/)\n- [Node.js](https://nodejs.org/) (with npm)\n- [Ember CLI](https://cli.emberjs.com/release/)\n- [Google Chrome](https://google.com/chrome/)\n\n## Installation\n\n- `git clone <repository-url>` this repository\n- `cd embroider3-webpack`\n- `npm install`\n\n## Running / Development\n\n- `npm run start`\n- Visit your app at [http://localhost:4200](http://localhost:4200).\n- Visit your tests at [http://localhost:4200/tests](http://localhost:4200/tests).\n\n### Code Generators\n\nMake use of the many generators for code, try `ember help generate` for more details\n\n### Running Tests\n\n- `npm run test`\n- `npm run test:ember -- --server`\n\n### Linting\n\n- `npm run lint`\n- `npm run lint:fix`\n\n### Building\n\n- `npm exec ember build` (development)\n- `npm run build` (production)\n\n### Deploying\n\nSpecify what it takes to deploy your app.\n\n## Further Reading / Useful Links\n\n- [ember.js](https://emberjs.com/)\n- [ember-cli](https://cli.emberjs.com/release/)\n- Development Browser Extensions\n  - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi)\n  - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/)\n"
  },
  {
    "path": "test-apps/embroider3-webpack/app/app.js",
    "content": "import Application from '@ember/application';\nimport Resolver from 'ember-resolver';\nimport loadInitializers from 'ember-load-initializers';\nimport config from 'embroider3-webpack/config/environment';\nimport { importSync, isDevelopingApp, macroCondition } from '@embroider/macros';\n\nif (macroCondition(isDevelopingApp())) {\n  importSync('./deprecation-workflow');\n}\n\nexport default class App extends Application {\n  modulePrefix = config.modulePrefix;\n  podModulePrefix = config.podModulePrefix;\n  Resolver = Resolver;\n}\n\nloadInitializers(App, config.modulePrefix);\n"
  },
  {
    "path": "test-apps/embroider3-webpack/app/components/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/embroider3-webpack/app/controllers/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/embroider3-webpack/app/deprecation-workflow.js",
    "content": "import setupDeprecationWorkflow from 'ember-cli-deprecation-workflow';\n\n/**\n * Docs: https://github.com/ember-cli/ember-cli-deprecation-workflow\n */\nsetupDeprecationWorkflow({\n  /**\n    false by default, but if a developer / team wants to be more aggressive about being proactive with\n    handling their deprecations, this should be set to \"true\"\n  */\n  throwOnUnhandled: false,\n  workflow: [\n    /* ... handlers ... */\n    /* to generate this list, run your app for a while (or run the test suite),\n     * and then run in the browser console:\n     *\n     *    deprecationWorkflow.flushDeprecations()\n     *\n     * And copy the handlers here\n     */\n    /* example: */\n    /* { handler: 'silence', matchId: 'template-action' }, */\n  ],\n});\n"
  },
  {
    "path": "test-apps/embroider3-webpack/app/helpers/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/embroider3-webpack/app/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>Embroider3Webpack</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n\n    <link integrity=\"\" rel=\"stylesheet\" href=\"{{rootURL}}assets/vendor.css\">\n    <link integrity=\"\" rel=\"stylesheet\" href=\"{{rootURL}}assets/embroider3-webpack.css\">\n\n    {{content-for \"head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n\n    <script src=\"{{rootURL}}assets/vendor.js\"></script>\n    <script src=\"{{rootURL}}assets/embroider3-webpack.js\"></script>\n\n    {{content-for \"body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "test-apps/embroider3-webpack/app/models/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/embroider3-webpack/app/router.js",
    "content": "import EmberRouter from '@ember/routing/router';\nimport config from 'embroider3-webpack/config/environment';\n\nexport default class Router extends EmberRouter {\n  location = config.locationType;\n  rootURL = config.rootURL;\n}\n\nRouter.map(function () {});\n"
  },
  {
    "path": "test-apps/embroider3-webpack/app/routes/.gitkeep",
    "content": ""
  },
  {
    "path": "test-apps/embroider3-webpack/app/styles/app.css",
    "content": "/* Ember supports plain CSS out of the box. More info: https://cli.emberjs.com/release/advanced-use/stylesheets/ */\n"
  },
  {
    "path": "test-apps/embroider3-webpack/app/templates/application.hbs",
    "content": "{{page-title \"Embroider3Webpack\"}}\n\n{{outlet}}\n\n{{! The following component displays Ember's default welcome message. }}\n<WelcomePage />\n{{! Feel free to remove this! }}"
  },
  {
    "path": "test-apps/embroider3-webpack/config/ember-cli-update.json",
    "content": "{\n  \"schemaVersion\": \"1.0.0\",\n  \"packages\": [\n    {\n      \"name\": \"ember-cli\",\n      \"version\": \"6.6.0\",\n      \"blueprints\": [\n        {\n          \"name\": \"app\",\n          \"outputRepo\": \"https://github.com/ember-cli/ember-new-output\",\n          \"codemodsSource\": \"ember-app-codemods-manifest@1\",\n          \"isBaseBlueprint\": true,\n          \"options\": [\n            \"--embroider\",\n            \"--ci-provider=github\"\n          ]\n        }\n      ]\n    }\n  ]\n}\n"
  },
  {
    "path": "test-apps/embroider3-webpack/config/environment.js",
    "content": "'use strict';\n\nmodule.exports = function (environment) {\n  const ENV = {\n    modulePrefix: 'embroider3-webpack',\n    environment,\n    rootURL: '/',\n    locationType: 'history',\n    EmberENV: {\n      EXTEND_PROTOTYPES: false,\n      FEATURES: {\n        // Here you can enable experimental features on an ember canary build\n        // e.g. EMBER_NATIVE_DECORATOR_SUPPORT: true\n      },\n    },\n\n    APP: {\n      // Here you can pass flags/options to your application instance\n      // when it is created\n    },\n  };\n\n  if (environment === 'development') {\n    // ENV.APP.LOG_RESOLVER = true;\n    // ENV.APP.LOG_ACTIVE_GENERATION = true;\n    // ENV.APP.LOG_TRANSITIONS = true;\n    // ENV.APP.LOG_TRANSITIONS_INTERNAL = true;\n    // ENV.APP.LOG_VIEW_LOOKUPS = true;\n  }\n\n  if (environment === 'test') {\n    // Testem prefers this...\n    ENV.locationType = 'none';\n\n    // keep test console output quieter\n    ENV.APP.LOG_ACTIVE_GENERATION = false;\n    ENV.APP.LOG_VIEW_LOOKUPS = false;\n\n    ENV.APP.rootElement = '#ember-testing';\n    ENV.APP.autoboot = false;\n  }\n\n  if (environment === 'production') {\n    // here you can enable a production-specific feature\n  }\n\n  return ENV;\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/config/optional-features.json",
    "content": "{\n  \"application-template-wrapper\": false,\n  \"default-async-observers\": true,\n  \"jquery-integration\": false,\n  \"template-only-glimmer-components\": true,\n  \"no-implicit-route-model\": true\n}\n"
  },
  {
    "path": "test-apps/embroider3-webpack/config/targets.js",
    "content": "'use strict';\n\nconst browsers = [\n  'last 1 Chrome versions',\n  'last 1 Firefox versions',\n  'last 1 Safari versions',\n];\n\nmodule.exports = {\n  browsers,\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/ember-cli-build.js",
    "content": "'use strict';\n\nconst EmberApp = require('ember-cli/lib/broccoli/ember-app');\n\nmodule.exports = function (defaults) {\n  const app = new EmberApp(defaults, {\n    emberData: {\n      deprecations: {\n        // New projects can safely leave this deprecation disabled.\n        // If upgrading, to opt-into the deprecated behavior, set this to true and then follow:\n        // https://deprecations.emberjs.com/id/ember-data-deprecate-store-extends-ember-object\n        // before upgrading to Ember Data 6.0\n        DEPRECATE_STORE_EXTENDS_EMBER_OBJECT: false,\n      },\n    },\n    // Add options here\n  });\n\n  const { Webpack } = require('@embroider/webpack');\n  return require('@embroider/compat').compatBuild(app, Webpack, {\n    staticAddonTestSupportTrees: true,\n    staticAddonTrees: true,\n    staticEmberSource: true,\n    staticInvokables: true,\n    skipBabel: [\n      {\n        package: 'qunit',\n      },\n    ],\n  });\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/eslint.config.mjs",
    "content": "import { ember } from 'ember-eslint';\nimport * as url from 'url';\n\n// Needed until Node 20\nconst dirname = url.fileURLToPath(new URL('.', import.meta.url));\n\nexport default [...ember.recommended(dirname)];\n"
  },
  {
    "path": "test-apps/embroider3-webpack/package.json",
    "content": "{\n  \"name\": \"embroider3-webpack\",\n  \"version\": \"0.0.0\",\n  \"private\": true,\n  \"description\": \"Small description for embroider3-webpack goes here\",\n  \"repository\": \"\",\n  \"license\": \"MIT\",\n  \"author\": \"\",\n  \"directories\": {\n    \"doc\": \"doc\",\n    \"test\": \"tests\"\n  },\n  \"scripts\": {\n    \"build\": \"ember build --environment=production\",\n    \"format\": \"prettier . --cache --write\",\n    \"lint\": \"concurrently \\\"npm:lint:*(!fix)\\\" --names \\\"lint:\\\" --prefixColors auto\",\n    \"lint:fix\": \"concurrently \\\"npm:lint:*:fix\\\" --names \\\"fix:\\\" --prefixColors auto && npm run format\",\n    \"lint:format\": \"prettier . --cache --check\",\n    \"lint:js\": \"eslint . --cache\",\n    \"lint:js:fix\": \"eslint . --fix\",\n    \"start\": \"ember serve\",\n    \"test\": \"concurrently \\\"npm:lint\\\" \\\"npm:test:*\\\" --names \\\"lint,test:\\\" --prefixColors auto\",\n    \"test:ember\": \"ember test\"\n  },\n  \"devDependencies\": {\n    \"@babel/core\": \"7.28.6\",\n    \"@babel/plugin-proposal-decorators\": \"7.28.6\",\n    \"@ember/optional-features\": \"2.3.0\",\n    \"@ember/test-helpers\": \"5.4.1\",\n    \"@embroider/compat\": \"3.9.3\",\n    \"@embroider/core\": \"3.5.9\",\n    \"@embroider/macros\": \"1.19.7\",\n    \"@embroider/webpack\": \"4.1.2\",\n    \"@glimmer/component\": \"2.0.0\",\n    \"@glimmer/tracking\": \"1.1.2\",\n    \"broccoli-asset-rev\": \"3.0.0\",\n    \"concurrently\": \"9.2.1\",\n    \"ember-auto-import\": \"2.12.1\",\n    \"ember-cli\": \"6.9.1\",\n    \"ember-cli-app-version\": \"7.0.0\",\n    \"ember-cli-babel\": \"8.2.0\",\n    \"ember-cli-clean-css\": \"3.0.0\",\n    \"ember-cli-dependency-checker\": \"3.3.3\",\n    \"ember-cli-deprecation-workflow\": \"3.4.0\",\n    \"ember-cli-htmlbars\": \"6.3.0\",\n    \"ember-cli-inject-live-reload\": \"2.1.0\",\n    \"ember-eslint\": \"0.6.1\",\n    \"ember-load-initializers\": \"3.0.1\",\n    \"ember-modifier\": \"4.2.2\",\n    \"ember-page-title\": \"9.0.3\",\n    \"ember-qunit\": \"9.0.4\",\n    \"ember-resolver\": \"13.1.1\",\n    \"ember-source\": \"6.10.1\",\n    \"ember-template-imports\": \"4.4.0\",\n    \"ember-welcome-page\": \"7.0.2\",\n    \"eslint\": \"9.39.4\",\n    \"loader.js\": \"4.7.0\",\n    \"prettier\": \"3.8.1\",\n    \"prettier-plugin-ember-template-tag\": \"2.1.3\",\n    \"qunit\": \"2.25.0\",\n    \"qunit-dom\": \"3.5.0\",\n    \"tracked-built-ins\": \"3.4.0\",\n    \"webpack\": \"5.104.1\"\n  },\n  \"dependencies\": {\n    \"ember-exam\": \"workspace:*\"\n  },\n  \"engines\": {\n    \"node\": \">= 20.11\"\n  },\n  \"ember\": {\n    \"edition\": \"octane\"\n  }\n}\n"
  },
  {
    "path": "test-apps/embroider3-webpack/public/robots.txt",
    "content": "# http://www.robotstxt.org\nUser-agent: *\nDisallow:\n"
  },
  {
    "path": "test-apps/embroider3-webpack/testem.js",
    "content": "'use strict';\n\nmodule.exports = {\n  test_page: 'tests/index.html?hidepassed',\n  disable_watching: true,\n  launch_in_ci: ['Chrome'],\n  launch_in_dev: ['Chrome'],\n  browser_start_timeout: 120,\n  browser_args: {\n    Chrome: {\n      ci: [\n        // --no-sandbox is needed when running Chrome inside a container\n        process.env.CI ? '--no-sandbox' : null,\n        '--headless',\n        '--disable-dev-shm-usage',\n        '--disable-software-rasterizer',\n        '--mute-audio',\n        '--remote-debugging-port=0',\n        '--window-size=1440,900',\n      ].filter(Boolean),\n    },\n  },\n};\n"
  },
  {
    "path": "test-apps/embroider3-webpack/tests/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>Embroider3Webpack Tests</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n    {{content-for \"test-head\"}}\n\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/vendor.css\">\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/embroider3-webpack.css\">\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/test-support.css\">\n\n    {{content-for \"head-footer\"}}\n    {{content-for \"test-head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n    {{content-for \"test-body\"}}\n\n    <div id=\"qunit\"></div>\n    <div id=\"qunit-fixture\">\n      <div id=\"ember-testing-container\">\n        <div id=\"ember-testing\"></div>\n      </div>\n    </div>\n\n    <script src=\"/testem.js\" integrity=\"\" data-embroider-ignore></script>\n    <script src=\"{{rootURL}}assets/vendor.js\"></script>\n    <script src=\"{{rootURL}}assets/test-support.js\"></script>\n    <script src=\"{{rootURL}}assets/embroider3-webpack.js\"></script>\n    <script src=\"{{rootURL}}assets/tests.js\"></script>\n\n    {{content-for \"body-footer\"}}\n    {{content-for \"test-body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "test-apps/embroider3-webpack/tests/test-helper.js",
    "content": "import Application from 'embroider3-webpack/app';\nimport config from 'embroider3-webpack/config/environment';\nimport { setApplication } from '@ember/test-helpers';\nimport { setupEmberOnerrorValidation } from 'ember-qunit';\nimport { start as startEmberExam } from 'ember-exam/test-support';\n\nsetApplication(Application.create(config.APP));\nsetupEmberOnerrorValidation();\nstartEmberExam();\n"
  },
  {
    "path": "test-apps/vite-with-compat/.editorconfig",
    "content": "# EditorConfig helps developers define and maintain consistent\n# coding styles between different editors and IDEs\n# editorconfig.org\n\nroot = true\n\n[*]\nend_of_line = lf\ncharset = utf-8\ntrim_trailing_whitespace = true\ninsert_final_newline = true\nindent_style = space\nindent_size = 2\n\n[*.hbs]\ninsert_final_newline = false\n\n[*.{diff,md}]\ntrim_trailing_whitespace = false\n"
  },
  {
    "path": "test-apps/vite-with-compat/.ember-cli",
    "content": "{\n  /**\n    Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript\n    rather than JavaScript by default, when a TypeScript version of a given blueprint is available.\n  */\n  \"isTypeScriptProject\": false,\n\n  /**\n    Setting `componentAuthoringFormat` to \"strict\" will force the blueprint generators to generate GJS\n    or GTS files for the component and the component rendering test. \"loose\" is the default.\n  */\n  \"componentAuthoringFormat\": \"strict\",\n\n  /**\n    Setting `routeAuthoringFormat` to \"strict\" will force the blueprint generators to generate GJS\n    or GTS templates for routes. \"loose\" is the default\n  */\n  \"routeAuthoringFormat\": \"strict\"\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/.gitignore",
    "content": "# compiled output\n/dist/\n/declarations/\n/tmp/\n\n# dependencies\n/node_modules/\n\n# misc\n*.local\n/.pnp*\n/.eslintcache\n/coverage/\n/npm-debug.log*\n/testem.log\n/yarn-error.log\n\n# ember-try\n/.node_modules.ember-try/\n/npm-shrinkwrap.json.ember-try\n/package.json.ember-try\n/package-lock.json.ember-try\n/yarn.lock.ember-try\n\n# broccoli-debug\n/DEBUG/\n"
  },
  {
    "path": "test-apps/vite-with-compat/.prettierignore",
    "content": "# unconventional js\n/blueprints/*/files/\n\n# compiled output\n/dist/\n\n# misc\n/coverage/\n!.*\n.*/\n/pnpm-lock.yaml\nember-cli-update.json\n*.html\n"
  },
  {
    "path": "test-apps/vite-with-compat/.prettierrc.mjs",
    "content": "export default {\n  plugins: ['prettier-plugin-ember-template-tag'],\n  singleQuote: true,\n  overrides: [\n    {\n      files: ['*.js', '*.ts', '*.cjs', '.mjs', '.cts', '.mts', '.cts'],\n      options: {\n        trailingComma: 'es5',\n      },\n    },\n    {\n      files: ['*.html'],\n      options: {\n        singleQuote: false,\n      },\n    },\n    {\n      files: ['*.json'],\n      options: {\n        singleQuote: false,\n      },\n    },\n    {\n      files: ['*.hbs'],\n      options: {\n        singleQuote: false,\n      },\n    },\n    {\n      files: ['*.gjs', '*.gts'],\n      options: {\n        templateSingleQuote: false,\n        trailingComma: 'es5',\n      },\n    },\n  ],\n};\n"
  },
  {
    "path": "test-apps/vite-with-compat/.template-lintrc.mjs",
    "content": "export default {\n  extends: 'recommended',\n};\n"
  },
  {
    "path": "test-apps/vite-with-compat/.watchmanconfig",
    "content": "{\n  \"ignore_dirs\": [\"dist\"]\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/README.md",
    "content": "# vite-with-compat\n\nThis README outlines the details of collaborating on this Ember application.\nA short introduction of this app could easily go here.\n\n## Prerequisites\n\nYou will need the following things properly installed on your computer.\n\n- [Git](https://git-scm.com/)\n- [Node.js](https://nodejs.org/)\n- [pnpm](https://pnpm.io/)\n- [Ember CLI](https://cli.emberjs.com/release/)\n- [Google Chrome](https://google.com/chrome/)\n\n## Installation\n\n- `git clone <repository-url>` this repository\n- `cd vite-with-compat`\n- `pnpm install`\n\n## Running / Development\n\n- `pnpm start`\n- Visit your app at [http://localhost:4200](http://localhost:4200).\n- Visit your tests at [http://localhost:4200/tests](http://localhost:4200/tests).\n\n### Code Generators\n\nMake use of the many generators for code, try `ember help generate` for more details\n\n### Running Tests\n\n- `pnpm test`\n- `pnpm test --server`\n\n### Linting\n\n- `pnpm lint`\n- `pnpm lint:fix`\n\n### Building\n\n- `pnpm ember build` (development)\n- `pnpm build` (production)\n\n### Deploying\n\nSpecify what it takes to deploy your app.\n\n## Further Reading / Useful Links\n\n- [ember.js](https://emberjs.com/)\n- [ember-cli](https://cli.emberjs.com/release/)\n- Development Browser Extensions\n  - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi)\n  - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/)\n"
  },
  {
    "path": "test-apps/vite-with-compat/app/app.js",
    "content": "import Application from '@ember/application';\nimport compatModules from '@embroider/virtual/compat-modules';\nimport Resolver from 'ember-resolver';\nimport config from 'vite-with-compat/config/environment';\n\nexport default class App extends Application {\n  modulePrefix = config.modulePrefix;\n  podModulePrefix = config.podModulePrefix;\n  Resolver = Resolver.withModules(compatModules);\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/app/config/environment.js",
    "content": "import loadConfigFromMeta from '@embroider/config-meta-loader';\nimport { assert } from '@ember/debug';\n\nconst config = loadConfigFromMeta('vite-with-compat');\n\nassert(\n  'config is not an object',\n  typeof config === 'object' && config !== null\n);\nassert(\n  'modulePrefix was not detected on your config',\n  'modulePrefix' in config && typeof config.modulePrefix === 'string'\n);\nassert(\n  'locationType was not detected on your config',\n  'locationType' in config && typeof config.locationType === 'string'\n);\nassert(\n  'rootURL was not detected on your config',\n  'rootURL' in config && typeof config.rootURL === 'string'\n);\nassert(\n  'APP was not detected on your config',\n  'APP' in config && typeof config.APP === 'object'\n);\n\nexport default config;\n"
  },
  {
    "path": "test-apps/vite-with-compat/app/router.js",
    "content": "import EmberRouter from '@ember/routing/router';\nimport config from 'vite-with-compat/config/environment';\n\nexport default class Router extends EmberRouter {\n  location = config.locationType;\n  rootURL = config.rootURL;\n}\n\nRouter.map(function () {});\n"
  },
  {
    "path": "test-apps/vite-with-compat/babel.config.cjs",
    "content": "const {\n  babelCompatSupport,\n  templateCompatSupport,\n} = require('@embroider/compat/babel');\n\nmodule.exports = {\n  plugins: [\n    [\n      'babel-plugin-ember-template-compilation',\n      {\n        compilerPath: 'ember-source/dist/ember-template-compiler.js',\n        enableLegacyModules: [\n          'ember-cli-htmlbars',\n          'ember-cli-htmlbars-inline-precompile',\n          'htmlbars-inline-precompile',\n        ],\n        transforms: [...templateCompatSupport()],\n      },\n    ],\n    [\n      'module:decorator-transforms',\n      {\n        runtime: {\n          import: require.resolve('decorator-transforms/runtime-esm'),\n        },\n      },\n    ],\n    [\n      '@babel/plugin-transform-runtime',\n      {\n        absoluteRuntime: __dirname,\n        useESModules: true,\n        regenerator: false,\n      },\n    ],\n    ...babelCompatSupport(),\n  ],\n\n  generatorOpts: {\n    compact: false,\n  },\n};\n"
  },
  {
    "path": "test-apps/vite-with-compat/config/ember-cli-update.json",
    "content": "{\n  \"schemaVersion\": \"1.0.0\",\n  \"packages\": [\n    {\n      \"name\": \"@ember/app-blueprint\",\n      \"version\": \"0.8.1\",\n      \"blueprints\": [\n        {\n          \"name\": \"@ember/app-blueprint\",\n          \"isBaseBlueprint\": true,\n          \"options\": [\n            \"--pnpm\",\n            \"--ci-provider=none\"\n          ]\n        }\n      ]\n    }\n  ]\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/config/environment.js",
    "content": "'use strict';\n\nmodule.exports = function (environment) {\n  const ENV = {\n    modulePrefix: 'vite-with-compat',\n    environment,\n    rootURL: '/',\n    locationType: 'history',\n    EmberENV: {\n      EXTEND_PROTOTYPES: false,\n      FEATURES: {\n        // Here you can enable experimental features on an ember canary build\n        // e.g. EMBER_NATIVE_DECORATOR_SUPPORT: true\n      },\n    },\n\n    APP: {\n      // Here you can pass flags/options to your application instance\n      // when it is created\n    },\n  };\n\n  if (environment === 'development') {\n    // ENV.APP.LOG_RESOLVER = true;\n    // ENV.APP.LOG_ACTIVE_GENERATION = true;\n    // ENV.APP.LOG_TRANSITIONS = true;\n    // ENV.APP.LOG_TRANSITIONS_INTERNAL = true;\n    // ENV.APP.LOG_VIEW_LOOKUPS = true;\n  }\n\n  if (environment === 'test') {\n    // Testem prefers this...\n    ENV.locationType = 'none';\n\n    // keep test console output quieter\n    ENV.APP.LOG_ACTIVE_GENERATION = false;\n    ENV.APP.LOG_VIEW_LOOKUPS = false;\n\n    ENV.APP.rootElement = '#ember-testing';\n    ENV.APP.autoboot = false;\n  }\n\n  if (environment === 'production') {\n    // here you can enable a production-specific feature\n  }\n\n  return ENV;\n};\n"
  },
  {
    "path": "test-apps/vite-with-compat/config/optional-features.json",
    "content": "{\n  \"application-template-wrapper\": false,\n  \"default-async-observers\": true,\n  \"jquery-integration\": false,\n  \"template-only-glimmer-components\": true,\n  \"no-implicit-route-model\": true\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/config/targets.js",
    "content": "'use strict';\n\nconst browsers = [\n  'last 1 Chrome versions',\n  'last 1 Firefox versions',\n  'last 1 Safari versions',\n];\n\nmodule.exports = {\n  browsers,\n};\n"
  },
  {
    "path": "test-apps/vite-with-compat/ember-cli-build.js",
    "content": "'use strict';\n\nconst EmberApp = require('ember-cli/lib/broccoli/ember-app');\nconst { compatBuild } = require('@embroider/compat');\n\nmodule.exports = async function (defaults) {\n  const { buildOnce } = await import('@embroider/vite');\n  let app = new EmberApp(defaults, {\n    emberData: {\n      deprecations: {\n        // New projects can safely leave this deprecation disabled.\n        // If upgrading, to opt-into the deprecated behavior, set this to true and then follow:\n        // https://deprecations.emberjs.com/id/ember-data-deprecate-store-extends-ember-object\n        // before upgrading to Ember Data 6.0\n        DEPRECATE_STORE_EXTENDS_EMBER_OBJECT: false,\n      },\n    },\n    // Add options here\n  });\n\n  return compatBuild(app, buildOnce);\n};\n"
  },
  {
    "path": "test-apps/vite-with-compat/eslint.config.mjs",
    "content": "import { ember } from 'ember-eslint';\nimport * as url from 'url';\n\n// Needed until Node 20\nconst dirname = url.fileURLToPath(new URL('.', import.meta.url));\n\nexport default [...ember.recommended(dirname)];\n"
  },
  {
    "path": "test-apps/vite-with-compat/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <script>\n      window.location.href = '/tests/';\n    </script>\n  </head>\n  <body></body>\n</html>\n"
  },
  {
    "path": "test-apps/vite-with-compat/package.json",
    "content": "{\n  \"name\": \"vite-with-compat\",\n  \"version\": \"0.0.0\",\n  \"private\": true,\n  \"description\": \"Small description for vite-with-compat goes here\",\n  \"repository\": \"\",\n  \"license\": \"MIT\",\n  \"author\": \"\",\n  \"directories\": {\n    \"doc\": \"doc\",\n    \"test\": \"tests\"\n  },\n  \"scripts\": {\n    \":exam\": \"ember exam --config-file ./testem.cjs --path dist --split 2 --parallel 1\",\n    \"start\": \"vite\",\n    \"start:vite-preview\": \"vite preview\",\n    \"start:ember-server\": \"ember test --config-file ./testem.cjs --path dist --serve --no-launch\",\n    \"start:exam\": \"pnpm :exam server\",\n    \"build\": \"vite build\",\n    \"build:tests\": \"vite build --mode development\",\n    \"format\": \"prettier . --cache --write\",\n    \"lint\": \"concurrently \\\"pnpm:lint:*(!fix)\\\" --names \\\"lint:\\\" --prefixColors auto\",\n    \"lint:fix\": \"concurrently \\\"pnpm:lint:*:fix\\\" --names \\\"fix:\\\" --prefixColors auto && pnpm format\",\n    \"lint:format\": \"prettier . --cache --check\",\n    \"lint:hbs\": \"ember-template-lint .\",\n    \"lint:hbs:fix\": \"ember-template-lint . --fix\",\n    \"lint:js\": \"eslint . --cache\",\n    \"lint:js:fix\": \"eslint . --fix\",\n    \"sync\": \"echo 'pnpm will sync injected dependencies. See pnpm-workspace.yaml'\",\n    \"test:normal\": \"testem ci\",\n    \"test:ember\": \"pnpm build:tests && pnpm test:normal\",\n    \"test:exam\": \"pnpm :exam --random\"\n  },\n  \"exports\": {\n    \"./tests/*\": \"./tests/*\",\n    \"./*\": \"./app/*\"\n  },\n  \"dependencies\": {\n    \"ember-exam\": \"workspace:*\"\n  },\n  \"devDependencies\": {\n    \"@babel/core\": \"7.28.6\",\n    \"@babel/plugin-transform-runtime\": \"7.28.5\",\n    \"@babel/runtime\": \"7.28.6\",\n    \"@ember/optional-features\": \"2.3.0\",\n    \"@ember/string\": \"4.0.1\",\n    \"@ember/test-helpers\": \"5.4.1\",\n    \"@ember/test-waiters\": \"4.1.1\",\n    \"@embroider/compat\": \"4.1.17\",\n    \"@embroider/config-meta-loader\": \"1.0.0\",\n    \"@embroider/core\": \"4.4.7\",\n    \"@embroider/macros\": \"1.19.7\",\n    \"@embroider/vite\": \"1.5.2\",\n    \"@glimmer/component\": \"2.0.0\",\n    \"@rollup/plugin-babel\": \"6.1.0\",\n    \"babel-plugin-ember-template-compilation\": \"2.4.1\",\n    \"concurrently\": \"9.2.1\",\n    \"decorator-transforms\": \"2.3.1\",\n    \"ember-auto-import\": \"2.12.1\",\n    \"ember-cli\": \"6.9.1\",\n    \"ember-cli-babel\": \"8.2.0\",\n    \"ember-eslint\": \"0.6.1\",\n    \"ember-qunit\": \"9.0.4\",\n    \"ember-resolver\": \"13.1.1\",\n    \"ember-source\": \"6.10.1\",\n    \"ember-template-lint\": \"7.9.3\",\n    \"eslint\": \"9.39.4\",\n    \"prettier\": \"3.8.1\",\n    \"prettier-plugin-ember-template-tag\": \"2.1.3\",\n    \"qunit\": \"2.25.0\",\n    \"qunit-dom\": \"3.5.0\",\n    \"testem\": \"3.17.0\",\n    \"vite\": \"7.3.2\"\n  },\n  \"engines\": {\n    \"node\": \">= 18\"\n  },\n  \"ember\": {\n    \"edition\": \"octane\"\n  }\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/public/robots.txt",
    "content": "# http://www.robotstxt.org\nUser-agent: *\nDisallow:\n"
  },
  {
    "path": "test-apps/vite-with-compat/testem.cjs",
    "content": "'use strict';\n\nif (typeof module !== 'undefined') {\n  module.exports = {\n    test_page: 'tests/index.html?hidepassed',\n    cwd: process.env.TESTEM_DIR ?? 'dist',\n    disable_watching: true,\n    launch_in_ci: ['Chrome'],\n    launch_in_dev: ['Chrome'],\n    browser_start_timeout: 120,\n    browser_disconnect_timeout: 30,\n    browser_args: {\n      Chrome: {\n        ci: [\n          // --no-sandbox is needed when running Chrome inside a container\n          process.env.CI ? '--no-sandbox' : null,\n          '--headless',\n          '--disable-dev-shm-usage',\n          '--disable-software-rasterizer',\n          '--mute-audio',\n          '--remote-debugging-port=0',\n          '--window-size=1440,900',\n        ].filter(Boolean),\n      },\n    },\n  };\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/tests/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>ViteWithCompat Tests</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n    {{content-for \"test-head\"}}\n\n    <link rel=\"stylesheet\" href=\"/@embroider/virtual/vendor.css\">\n    <link rel=\"stylesheet\" href=\"/@embroider/virtual/test-support.css\">\n\n    {{content-for \"head-footer\"}}\n    {{content-for \"test-head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n    {{content-for \"test-body\"}}\n\n    <div id=\"qunit\"></div>\n    <div id=\"qunit-fixture\">\n      <div id=\"ember-testing-container\">\n        <div id=\"ember-testing\"></div>\n      </div>\n    </div>\n\n    <script src=\"/testem.js\" integrity=\"\" data-embroider-ignore></script>\n    <script src=\"/@embroider/virtual/vendor.js\"></script>\n    <script src=\"/@embroider/virtual/test-support.js\"></script>\n    <script type=\"module\">import \"ember-testing\";</script>\n\n    <script type=\"module\">\n      import { start } from './test-helper.js';\n\n      const availableModules = {\n        ...import.meta.glob(\"./**/*-test.{js,ts,gjs,gts}\")\n      };\n\n      start({ availableModules });\n    </script>\n\n    {{content-for \"body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "test-apps/vite-with-compat/tests/integration/a-test.gjs",
    "content": "import { module, test } from 'qunit';\nimport { setupRenderingTest } from 'ember-qunit';\nimport { render } from '@ember/test-helpers';\n\nconsole.log('Suite A is evaluated');\n\nmodule('Suite A', function (hooks) {\n  setupRenderingTest(hooks);\n\n  test('a', async function (assert) {\n    await render(<template>a</template>);\n\n    assert.dom().hasText('a');\n  });\n\n  test('b', async function (assert) {\n    await new Promise(resolve => setTimeout(resolve, 2_000));\n    await render(<template>b</template>);\n\n    assert.dom().hasText('b');\n  });\n\n  test('c', async function (assert) {\n    await render(<template>c</template>);\n\n    assert.dom().hasText('c');\n  });\n});\n"
  },
  {
    "path": "test-apps/vite-with-compat/tests/integration/b-test.gjs",
    "content": "import { module, test } from 'qunit';\nimport { setupRenderingTest } from 'ember-qunit';\nimport { render } from '@ember/test-helpers';\n\nconsole.log('Suite B is evaluated');\n\nmodule('Suite B', function (hooks) {\n  setupRenderingTest(hooks);\n\n  test('a', async function (assert) {\n    await new Promise((resolve) => setTimeout(resolve, 2_000));\n    await render(<template>a</template>);\n\n    assert.dom().hasText('a');\n  });\n\n  test('b', async function (assert) {\n    await render(<template>b</template>);\n\n    assert.dom().hasText('b');\n  });\n\n  test('c', async function (assert) {\n    await render(<template>c</template>);\n\n    assert.dom().hasText('c');\n  });\n});\n"
  },
  {
    "path": "test-apps/vite-with-compat/tests/test-helper.js",
    "content": "import Application from 'vite-with-compat/app';\nimport config from 'vite-with-compat/config/environment';\nimport * as QUnit from 'qunit';\nimport { setApplication } from '@ember/test-helpers';\nimport { setup } from 'qunit-dom';\nimport { start as startEmberExam } from 'ember-exam/test-support';\n\nexport async function start({ availableModules }) {\n  setApplication(Application.create(config.APP));\n\n  setup(QUnit.assert);\n\n  await startEmberExam({ availableModules });\n}\n"
  },
  {
    "path": "test-apps/vite-with-compat/vite.config.mjs",
    "content": "import { defineConfig } from 'vite';\nimport { extensions, classicEmberSupport, ember } from '@embroider/vite';\nimport { babel } from '@rollup/plugin-babel';\n\nexport default defineConfig({\n  plugins: [\n    classicEmberSupport(),\n    ember(),\n    // extra plugins here\n    babel({\n      babelHelpers: 'runtime',\n      extensions,\n    }),\n  ],\n  build: {\n    rollupOptions: {\n      output: {\n        /**\n         * This is super try-hard mode, but since we're debugging\n         * with built assets, we want to have smaller chunks for easier debugging without sourcemaps.\n         */\n        // manualChunks(id) {\n        //   let maybePkg = guessPkgName(id);\n        //\n        //   return maybePkg;\n        // },\n      },\n    },\n  },\n});\n\n// eslint-disable-next-line no-unused-vars\nfunction guessPkgName(id) {\n  if (!id.includes('/')) {\n    return id;\n  }\n\n  let parts = id.split('/node_modules/');\n\n  let significant = parts.at(-1);\n\n  {\n    let parts = significant.split('.pnpm/');\n\n    significant = parts.at(-1);\n  }\n\n  {\n    let parts = significant.split('/');\n\n    if (parts[0] !== id) return parts[0].replace('@', '');\n  }\n\n  if (id.includes('/-embroider')) {\n    return 'embroider';\n  }\n\n  return guessPkgName(id);\n}\n"
  },
  {
    "path": "testem.js",
    "content": "module.exports = {\n  test_page: 'tests/index.html?hidepassed',\n  disable_watching: true,\n  launch_in_ci: ['Chrome'],\n  launch_in_dev: ['Chrome'],\n  timeout: 25,\n  browser_args: {\n    Chrome: {\n      ci: [\n        // --no-sandbox is needed when running Chrome inside a container\n        process.env.CI ? '--no-sandbox' : null,\n        '--headless',\n        '--disable-dev-shm-usage',\n        '--disable-software-rasterizer',\n        '--mute-audio',\n        '--remote-debugging-port=0',\n        '--window-size=1440,900',\n      ].filter(Boolean),\n    },\n  },\n  parallel: -1,\n};\n"
  },
  {
    "path": "testem.multiple-test-page.js",
    "content": "module.exports = {\n  framework: 'qunit',\n  test_page: [\n    'tests/index.html?hidepassed&derp=herp',\n    'tests/index.html?hidepassed&foo=bar',\n  ],\n  disable_watching: true,\n  launch_in_ci: ['Chrome'],\n  launch_in_dev: ['Chrome'],\n  browser_args: {\n    Chrome: [\n      '--disable-gpu',\n      '--headless',\n      '--remote-debugging-port=9222',\n      '--window-size=1440,900',\n    ],\n  },\n  parallel: -1,\n};\n"
  },
  {
    "path": "testem.no-test-page.js",
    "content": "module.exports = {\n  framework: 'qunit',\n  disable_watching: true,\n  launch_in_ci: ['Chrome'],\n  launch_in_dev: ['Chrome'],\n  browser_args: {\n    Chrome: [\n      '--disable-gpu',\n      '--headless',\n      '--remote-debugging-port=9222',\n      '--window-size=1440,900',\n    ],\n  },\n  parallel: -1,\n};\n"
  },
  {
    "path": "testem.simple-test-page.js",
    "content": "module.exports = {\n  foo: 'bar',\n};\n"
  },
  {
    "path": "tests/dummy/app/app.js",
    "content": "import Application from '@ember/application';\nimport Resolver from 'ember-resolver';\nimport loadInitializers from 'ember-load-initializers';\nimport config from './config/environment';\n\nclass App extends Application {\n  modulePrefix = config.modulePrefix;\n  podModulePrefix = config.podModulePrefix;\n  Resolver = Resolver;\n}\n\nloadInitializers(App, config.modulePrefix);\n\nexport default App;\n"
  },
  {
    "path": "tests/dummy/app/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>Ember Exam</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n\n    <link integrity=\"\" rel=\"stylesheet\" href=\"{{rootURL}}assets/vendor.css\">\n    <link integrity=\"\" rel=\"stylesheet\" href=\"{{rootURL}}assets/dummy.css\">\n\n    {{content-for \"head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n\n    <script src=\"{{rootURL}}assets/vendor.js\"></script>\n    <script src=\"{{rootURL}}assets/dummy.js\"></script>\n\n    {{content-for \"body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "tests/dummy/app/router.js",
    "content": "import AddonDocsRouter, { docsRoute } from 'ember-cli-addon-docs/router';\nimport config from './config/environment';\n\nconst Router = AddonDocsRouter.extend({\n  location: config.locationType,\n  rootURL: config.rootURL,\n});\n\nRouter.map(function () {\n  docsRoute(this, function () {\n    this.route('randomization');\n    this.route('randomization-iterator');\n    this.route('module-metadata');\n    this.route('splitting');\n    this.route('split-parallel');\n    this.route('filtering');\n    this.route('load-balancing');\n    this.route('preserve-test-name');\n\n    this.route('ember-try-and-ci');\n    this.route('test-suite-segmentation');\n  });\n\n  this.route('not-found', { path: '/*path' });\n});\n\nexport default Router;\n"
  },
  {
    "path": "tests/dummy/app/styles/app.css",
    "content": ":root {\n  --brand-primary: #751c27;\n}\n\n.home {\n  padding-left: 1rem;\n  padding-right: 1rem;\n  max-width: 900px;\n  margin: 2rem auto 4rem;\n}\n\n.home__section {\n  margin-bottom: 2.5rem;\n}\n\n.home__lead {\n  margin-top: 0.5rem;\n  font-size: 18px;\n  line-height: 1.5;\n}\n\n.max-width {\n  max-width: 100%;\n}\n"
  },
  {
    "path": "tests/dummy/config/ember-cli-update.json",
    "content": "{\n  \"schemaVersion\": \"1.0.0\",\n  \"packages\": [\n    {\n      \"name\": \"ember-cli\",\n      \"version\": \"5.5.0\",\n      \"blueprints\": [\n        {\n          \"name\": \"addon\",\n          \"outputRepo\": \"https://github.com/ember-cli/ember-addon-output\",\n          \"codemodsSource\": \"ember-addon-codemods-manifest@1\",\n          \"isBaseBlueprint\": true,\n          \"options\": [\n            \"--yarn\",\n            \"--no-welcome\"\n          ]\n        }\n      ]\n    }\n  ]\n}\n"
  },
  {
    "path": "tests/dummy/config/ember-try.js",
    "content": "'use strict';\n\nconst getChannelURL = require('ember-source-channel-url');\nconst { embroiderSafe, embroiderOptimized } = require('@embroider/test-setup');\n\nconst command = [\n  'ember',\n  'exam',\n  '--split',\n  '3',\n  '--parallel',\n  '1',\n  '--random',\n  process.env.TRAVIS_PULL_REQUEST,\n]\n  .filter(Boolean)\n  .join(' ');\n\nmodule.exports = async function () {\n  return {\n    command,\n    usePnpm: true,\n    scenarios: [\n      {\n        name: 'ember-lts-4.8',\n        npm: {\n          devDependencies: {\n            'ember-source': '~4.8.0',\n          },\n        },\n      },\n      {\n        name: 'ember-lts-4.12',\n        npm: {\n          devDependencies: {\n            'ember-source': '~4.12.0',\n          },\n        },\n      },\n      {\n        name: 'ember-release',\n        npm: {\n          devDependencies: {\n            'ember-source': await getChannelURL('release'),\n          },\n        },\n      },\n      {\n        name: 'ember-beta',\n        npm: {\n          devDependencies: {\n            'ember-source': await getChannelURL('beta'),\n          },\n        },\n      },\n      {\n        name: 'ember-canary',\n        npm: {\n          devDependencies: {\n            'ember-source': await getChannelURL('canary'),\n          },\n        },\n      },\n      embroiderSafe(),\n      embroiderOptimized(),\n    ],\n  };\n};\n"
  },
  {
    "path": "tests/dummy/config/environment.js",
    "content": "'use strict';\n\nmodule.exports = function (environment) {\n  const ENV = {\n    modulePrefix: 'dummy',\n    environment,\n    rootURL: '/',\n    locationType: 'history',\n    EmberENV: {\n      EXTEND_PROTOTYPES: false,\n      FEATURES: {\n        // Here you can enable experimental features on an ember canary build\n        // e.g. 'with-controller': true\n      },\n    },\n\n    APP: {\n      // Here you can pass flags/options to your application instance\n      // when it is created\n    },\n  };\n\n  if (environment === 'development') {\n    // ENV.APP.LOG_RESOLVER = true;\n    // ENV.APP.LOG_ACTIVE_GENERATION = true;\n    // ENV.APP.LOG_TRANSITIONS = true;\n    // ENV.APP.LOG_TRANSITIONS_INTERNAL = true;\n    // ENV.APP.LOG_VIEW_LOOKUPS = true;\n  }\n\n  if (environment === 'test') {\n    // Testem prefers this...\n    ENV.locationType = 'none';\n\n    // keep test console output quieter\n    ENV.APP.LOG_ACTIVE_GENERATION = false;\n    ENV.APP.LOG_VIEW_LOOKUPS = false;\n\n    ENV.APP.rootElement = '#ember-testing';\n    ENV.APP.autoboot = false;\n  }\n\n  if (environment === 'production') {\n    // Allow ember-cli-addon-docs to update the rootURL in compiled assets\n    ENV.rootURL = 'ADDON_DOCS_ROOT_URL';\n    // here you can enable a production-specific feature\n  }\n\n  return ENV;\n};\n"
  },
  {
    "path": "tests/dummy/config/optional-features.json",
    "content": "{\n  \"application-template-wrapper\": false,\n  \"jquery-integration\": false,\n  \"template-only-glimmer-components\": true\n}\n"
  },
  {
    "path": "tests/dummy/config/targets.js",
    "content": "'use strict';\n\nconst browsers = [\n  'last 1 Chrome versions',\n  'last 1 Firefox versions',\n  'last 1 Safari versions',\n];\n\nmodule.exports = {\n  browsers,\n};\n"
  },
  {
    "path": "tests/dummy/public/crossdomain.xml",
    "content": "<?xml version=\"1.0\"?>\n<!DOCTYPE cross-domain-policy SYSTEM \"http://www.adobe.com/xml/dtds/cross-domain-policy.dtd\">\n<cross-domain-policy>\n  <!-- Read this: www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html -->\n\n  <!-- Most restrictive policy: -->\n  <site-control permitted-cross-domain-policies=\"none\"/>\n\n  <!-- Least restrictive policy: -->\n  <!--\n  <site-control permitted-cross-domain-policies=\"all\"/>\n  <allow-access-from domain=\"*\" to-ports=\"*\" secure=\"false\"/>\n  <allow-http-request-headers-from domain=\"*\" headers=\"*\" secure=\"false\"/>\n  -->\n</cross-domain-policy>\n"
  },
  {
    "path": "tests/dummy/public/robots.txt",
    "content": "# http://www.robotstxt.org\nUser-agent: *\nDisallow:\n"
  },
  {
    "path": "tests/index.html",
    "content": "<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>Ember Exam</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n\n    {{content-for \"head\"}}\n    {{content-for \"test-head\"}}\n\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/vendor.css\">\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/dummy.css\">\n    <link rel=\"stylesheet\" href=\"{{rootURL}}assets/test-support.css\">\n\n    {{content-for \"head-footer\"}}\n    {{content-for \"test-head-footer\"}}\n  </head>\n  <body>\n    {{content-for \"body\"}}\n    {{content-for \"test-body\"}}\n \n    <div id=\"qunit\"></div>\n    <div id=\"qunit-fixture\">\n      <div id=\"ember-testing-container\">\n        <div id=\"ember-testing\"></div>\n      </div>\n    </div>\n\n\n   <div id=\"qunit\"></div>\n    <div id=\"qunit-fixture\">\n      <div id=\"ember-testing-container\">\n        <div id=\"ember-testing\"></div>\n      </div>\n    </div>\n\n    <script src=\"/testem.js\" integrity=\"\" data-embroider-ignore></script>\n    <script src=\"{{rootURL}}assets/vendor.js\"></script>\n    <script src=\"{{rootURL}}assets/test-support.js\"></script>\n    <script src=\"{{rootURL}}assets/dummy.js\"></script>\n    <script src=\"{{rootURL}}assets/tests.js\"></script>\n\n    {{content-for \"body-footer\"}}\n    {{content-for \"test-body-footer\"}}\n  </body>\n</html>\n"
  },
  {
    "path": "tests/test-helper.js",
    "content": "import Application from 'dummy/app';\nimport config from 'dummy/config/environment';\nimport { setApplication } from '@ember/test-helpers';\nimport { start as startEmberExam } from 'ember-exam/test-support';\nimport { setupEmberOnerrorValidation } from 'ember-qunit';\n\nsetApplication(Application.create(config.APP));\nsetupEmberOnerrorValidation();\nstartEmberExam();\n"
  },
  {
    "path": "tests/unit/async-iterator-test.js",
    "content": "import AsyncIterator from 'ember-exam/test-support/-private/async-iterator';\nimport { module, test } from 'qunit';\n\nmodule('Unit | async-iterator', function (hooks) {\n  hooks.beforeEach(function () {\n    this.testem = {\n      eventHandler: new Array(),\n      emit: function (event) {\n        const argsWithoutFirst = Array.prototype.slice.call(arguments, 1);\n        if (this.eventHandler && this.eventHandler[event]) {\n          let handlers = this.eventHandler[event];\n          for (let i = 0; i < handlers.length; i++) {\n            handlers[i].apply(this, argsWithoutFirst);\n          }\n        }\n      },\n      on: function (event, callBack) {\n        if (!this.eventHandler) {\n          this.eventHandler = {};\n        }\n        if (!this.eventHandler[event]) {\n          this.eventHandler[event] = [];\n        }\n        this.eventHandler[event].push(callBack);\n      },\n      removeEventCallbacks: () => {},\n    };\n  });\n\n  test('should instantiate', function (assert) {\n    let iteratorOfPromises = new AsyncIterator(this.testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n    });\n\n    assert.false(iteratorOfPromises.done);\n    assert.deepEqual(typeof iteratorOfPromises.next, 'function');\n    assert.deepEqual(typeof iteratorOfPromises.dispose, 'function');\n  });\n\n  test('should get the value from response.', function (assert) {\n    const done = assert.async();\n    this.testem.on('next-module-request', () => {\n      this.testem.emit('next-module-response', {\n        done: false,\n        value: 'a',\n      });\n    });\n\n    const iteratorOfPromises = new AsyncIterator(this.testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n    });\n\n    iteratorOfPromises.next().then((result) => {\n      assert.deepEqual(result.value, 'a');\n      done();\n    });\n  });\n\n  test('should iterate promises until there is no response.', function (assert) {\n    const done = assert.async();\n    const testem = this.testem;\n    const responses = ['a', 'b', 'c'];\n\n    testem.on('next-module-request', () => {\n      testem.emit('next-module-response', {\n        done: responses.length === 0,\n        value: responses.shift(),\n      });\n    });\n\n    const iteratorOfPromises = new AsyncIterator(testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n    });\n\n    let values = [];\n\n    iteratorOfPromises\n      .next()\n      .then((res) => {\n        values.push(res.value);\n        return iteratorOfPromises.next();\n      })\n      .then((res) => {\n        values.push(res.value);\n        return iteratorOfPromises.next();\n      })\n      .then((res) => {\n        values.push(res.value);\n        assert.deepEqual(values, ['a', 'b', 'c']);\n        done();\n      });\n  });\n\n  test('should return false after disposing', function (assert) {\n    const iteratorOfPromises = new AsyncIterator(this.testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n    });\n\n    iteratorOfPromises.dispose();\n\n    assert.true(iteratorOfPromises.done);\n  });\n\n  test('should dispose after iteration.', function (assert) {\n    const done = assert.async();\n    const testem = this.testem;\n    const responses = ['a', 'b', 'c'];\n\n    testem.on('next-module-request', () => {\n      testem.emit('next-module-response', {\n        done: responses.length === 0,\n        value: responses.shift(),\n      });\n    });\n\n    const iteratorOfPromises = new AsyncIterator(testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n    });\n\n    iteratorOfPromises\n      .next()\n      .then((res) => {\n        assert.false(res.done);\n        return iteratorOfPromises.next();\n      })\n      .then((res) => {\n        assert.false(res.done);\n        return iteratorOfPromises.next();\n      })\n      .then((res) => {\n        assert.false(res.done);\n        return iteratorOfPromises.next();\n      })\n      .then((res) => {\n        assert.true(res.done);\n        done();\n      });\n  });\n\n  test('should resolve with iterator finishing if request is not handled within 2s', function (assert) {\n    const done = assert.async();\n    const iteratorOfPromises = new AsyncIterator(this.testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n      timeout: 2,\n    });\n\n    return iteratorOfPromises.next().then((res) => {\n      assert.true(res.done);\n      done();\n    });\n  });\n\n  test('should resolve a timeout error if request is not handled within 2s when emberExamExitOnError is true', function (assert) {\n    const done = assert.async();\n    const iteratorOfPromises = new AsyncIterator(this.testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n      timeout: 2,\n      emberExamExitOnError: true,\n    });\n\n    return iteratorOfPromises.next().then(\n      () => {\n        assert.ok(false, 'Promise should not resolve, expecting reject');\n        done();\n      },\n      (err) => {\n        assert.deepEqual(\n          err.message,\n          'EmberExam: Promise timed out after 2 s while waiting for response for next-module-request',\n        );\n        done();\n      },\n    );\n  });\n\n  test('should throw an error if handleResponse is invoked while not waiting for a response', function (assert) {\n    const iteratorOfPromises = new AsyncIterator(this.testem, {\n      request: 'next-module-request',\n      response: 'next-module-response',\n    });\n\n    assert.throws(\n      () => iteratorOfPromises.handleResponse({}),\n      /Was not expecting a response, but got a response/,\n    );\n  });\n});\n"
  },
  {
    "path": "tests/unit/filter-test-modules-test.js",
    "content": "import {\n  convertFilePathToModulePath,\n  filterTestModules,\n} from 'ember-exam/test-support/-private/filter-test-modules';\nimport { module, test } from 'qunit';\nimport { setupTest } from 'ember-qunit';\n\nmodule('Unit | filter-test-modules', function () {\n  module('covertFilePathToModulePath', function (hooks) {\n    setupTest(hooks);\n\n    test('should return an input string without file extension when the input contains file extension', function (assert) {\n      assert.strictEqual(\n        convertFilePathToModulePath('/tests/integration/foo.js'),\n        '/tests/integration/foo',\n      );\n    });\n\n    test(`should return an input string without file extension when the input doesn't contain file extension`, function (assert) {\n      assert.strictEqual(\n        convertFilePathToModulePath('/tests/integration/foo'),\n        '/tests/integration/foo',\n      );\n    });\n\n    test('should return an input string after `tests` when the input is a full test file path', function (assert) {\n      assert.strictEqual(\n        convertFilePathToModulePath('dummy/tests/integration/foo.js'),\n        '/tests/integration/foo',\n      );\n    });\n  });\n\n  module('modulePath', function (hooks) {\n    setupTest(hooks);\n    hooks.beforeEach(function () {\n      this.modules = ['foo-test', 'bar-test'];\n    });\n\n    hooks.afterEach(function () {\n      this.modules = [];\n    });\n\n    test('should return a list of filtered tests', function (assert) {\n      assert.deepEqual(['foo-test'], filterTestModules(this.modules, 'foo'));\n    });\n\n    test('should return an empty list when there is no match', function (assert) {\n      assert.throws(\n        () => filterTestModules(this.modules, 'no-match'),\n        /No tests matched with the filter:/,\n      );\n    });\n\n    test('should return a list of tests matched with a regular expression', function (assert) {\n      assert.deepEqual(['foo-test'], filterTestModules(this.modules, '/foo/'));\n    });\n\n    test('should return a list of tests matched with a regular expression that excluse foo', function (assert) {\n      assert.deepEqual(['bar-test'], filterTestModules(this.modules, '!/foo/'));\n    });\n\n    test('should return a list of tests matches with a list of string options', function (assert) {\n      assert.deepEqual(\n        ['foo-test', 'bar-test'],\n        filterTestModules(this.modules, 'foo, bar'),\n      );\n    });\n\n    test('should return a list of unique tests matches when options are repeated', function (assert) {\n      assert.deepEqual(\n        ['foo-test'],\n        filterTestModules(this.modules, 'foo, foo'),\n      );\n    });\n  });\n\n  module('filePath', function (hooks) {\n    setupTest(hooks);\n    hooks.beforeEach(function () {\n      this.modules = [\n        'dummy/tests/integration/foo-test',\n        'dummy/tests/unit/foo-test',\n        'dummy/tests/unit/bar-test',\n      ];\n    });\n\n    hooks.afterEach(function () {\n      this.modules = [];\n    });\n\n    test('should return a test module matched with full test file path', function (assert) {\n      assert.deepEqual(\n        ['dummy/tests/integration/foo-test'],\n        filterTestModules(\n          this.modules,\n          null,\n          'app/tests/integration/foo-test.js',\n        ),\n      );\n    });\n\n    test('should return a test module matched with relative test file path', function (assert) {\n      assert.deepEqual(\n        ['dummy/tests/unit/foo-test'],\n        filterTestModules(this.modules, null, '/unit/foo-test'),\n      );\n    });\n\n    test('should return a test module matched with test folder path with wildcard', function (assert) {\n      assert.deepEqual(\n        ['dummy/tests/unit/foo-test', 'dummy/tests/unit/bar-test'],\n        filterTestModules(this.modules, null, '/unit/*'),\n      );\n    });\n\n    test('should return a test module matched with test file path with wildcard', function (assert) {\n      assert.deepEqual(\n        ['dummy/tests/integration/foo-test', 'dummy/tests/unit/foo-test'],\n        filterTestModules(this.modules, null, '/tests/*/foo*'),\n      );\n    });\n\n    test('should return an empty list when there is no match', function (assert) {\n      assert.throws(\n        () => filterTestModules(this.modules, null, 'no-match'),\n        /No tests matched with the filter:/,\n      );\n    });\n\n    test('should return a list of tests matches with a list of string options', function (assert) {\n      assert.deepEqual(\n        ['dummy/tests/integration/foo-test', 'dummy/tests/unit/foo-test'],\n        filterTestModules(\n          this.modules,\n          null,\n          '/tests/integration/*, dummy/tests/unit/foo-test',\n        ),\n      );\n    });\n\n    test('should return a list of unique tests matches when options are repeated', function (assert) {\n      assert.deepEqual(\n        ['dummy/tests/unit/bar-test', 'dummy/tests/unit/foo-test'],\n        filterTestModules(\n          this.modules,\n          null,\n          'app/tests/unit/bar-test.js, /tests/unit/*',\n        ),\n      );\n    });\n  });\n});\n"
  },
  {
    "path": "tests/unit/multiple-edge-cases-test.js",
    "content": "import { module, test } from 'qunit';\n\nmodule('#3: Module With Multiple Edge Case Tests', function () {\n  test('#1 RegExp test', function (assert) {\n    assert.ok(/derp/.test('derp'));\n  });\n});\n"
  },
  {
    "path": "tests/unit/multiple-ember-tests-test.js",
    "content": "import { module, test } from 'qunit';\nimport { setupTest } from 'ember-qunit';\n\nmodule('#1: Module-For With Multiple Tests', function (hooks) {\n  setupTest(hooks);\n\n  test('#1', function (assert) {\n    assert.ok(true);\n  });\n  test('#2', function (assert) {\n    assert.ok(true);\n  });\n  test('#3', function (assert) {\n    assert.ok(true);\n  });\n  test('#4', function (assert) {\n    assert.ok(true);\n  });\n  test('#5', function (assert) {\n    assert.ok(true);\n  });\n  test('#6', function (assert) {\n    assert.ok(true);\n  });\n  test('#7', function (assert) {\n    assert.ok(true);\n  });\n  test('#8', function (assert) {\n    assert.ok(true);\n  });\n  test('#9', function (assert) {\n    assert.ok(true);\n  });\n});\n"
  },
  {
    "path": "tests/unit/multiple-tests-test.js",
    "content": "import { module, test } from 'qunit';\n\nmodule('#2: Module With Multiple Tests', function () {\n  test('#1', function (assert) {\n    assert.ok(true);\n  });\n  test('#2', function (assert) {\n    assert.ok(true);\n  });\n  test('#3', function (assert) {\n    assert.ok(true);\n  });\n  test('#4', function (assert) {\n    assert.ok(true);\n  });\n  test('#5', function (assert) {\n    assert.ok(true);\n  });\n  test('#6', function (assert) {\n    assert.ok(true);\n  });\n  test('#7', function (assert) {\n    assert.ok(true);\n  });\n  test('#8', function (assert) {\n    assert.ok(true);\n  });\n  test('#9', function (assert) {\n    assert.ok(true);\n  });\n});\n"
  },
  {
    "path": "tests/unit/test-loader-test.js",
    "content": "import EmberExamTestLoader from 'ember-exam/test-support/-private/ember-exam-test-loader';\nimport { module, test } from 'qunit';\n\nmodule('Unit | test-loader', function (hooks) {\n  hooks.beforeEach(function () {\n    this.originalRequire = window.require;\n    this.requiredModules = [];\n    window.require = (name) => {\n      this.requiredModules.push(name);\n    };\n\n    window.requirejs.entries = {\n      'test-1-test': true,\n      'test-2-test': true,\n      'test-3-test': true,\n      'test-4-test': true,\n    };\n    this.testem = {\n      eventQueue: new Array(),\n      emit: function (event) {\n        this.eventQueue.push(event);\n      },\n      on: () => {},\n    };\n    this.qunit = {\n      config: {\n        queue: [],\n      },\n      begin: () => {},\n      moduleDone: () => {},\n      testDone: () => {},\n    };\n  });\n\n  hooks.afterEach(function () {\n    window.require = this.originalRequire;\n  });\n\n  test('loads all test modules by default', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map(),\n      this.qunit,\n    );\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, [\n      'test-1-test',\n      'test-2-test',\n      'test-3-test',\n      'test-4-test',\n    ]);\n  });\n\n  test('loads all test modules when testem object is not available', async function (assert) {\n    const undefinedTestem = undefined;\n    const testLoader = new EmberExamTestLoader(\n      undefinedTestem,\n      new Map(),\n      this.qunit,\n    );\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, [\n      'test-1-test',\n      'test-2-test',\n      'test-3-test',\n      'test-4-test',\n    ]);\n  });\n\n  test('loads modules from a specified partition', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('partition', 3).set('split', 4),\n      this.qunit,\n    );\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, ['test-3-test']);\n  });\n\n  test('loads modules from multiple specified partitions', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('partition', [1, 3]).set('split', 4),\n      this.qunit,\n    );\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, ['test-1-test', 'test-3-test']);\n  });\n\n  test('loads modules from the first partition by default', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 4),\n      this.qunit,\n    );\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, ['test-1-test']);\n  });\n\n  test('handles params as strings', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('partition', 3).set('split', 4),\n      this.qunit,\n    );\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, ['test-3-test']);\n  });\n\n  test('throws an error if splitting less than one', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 0),\n      this.qunit,\n    );\n\n    assert.rejects(\n      testLoader.loadModules(),\n      /You must specify a split greater than 0/,\n    );\n  });\n\n  test(\"throws an error if partition isn't a number\", async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 2).set('partition', 'foo'),\n      this.qunit,\n    );\n\n    assert.rejects(\n      testLoader.loadModules(),\n      /You must specify numbers for partition \\(you specified 'foo'\\)/,\n    );\n  });\n\n  test(\"throws an error if partition isn't a number with multiple partitions\", async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 2).set('partition', [1, 'foo']),\n      this.qunit,\n    );\n\n    assert.rejects(\n      testLoader.loadModules(),\n      /You must specify numbers for partition \\(you specified '1,foo'\\)/,\n    );\n  });\n\n  test('throws an error if loading partition greater than split number', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 2).set('partition', 3),\n      this.qunit,\n    );\n\n    assert.rejects(\n      testLoader.loadModules(),\n      /You must specify partitions numbered less than or equal to your split value/,\n    );\n  });\n\n  test('throws an error if loading partition greater than split number with multiple partitions', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 2).set('partition', [2, 3]),\n      this.qunit,\n    );\n\n    assert.rejects(\n      testLoader.loadModules(),\n      /You must specify partitions numbered less than or equal to your split value/,\n    );\n  });\n\n  test('throws an error if loading partition less than one', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('split', 2).set('partition', 0),\n      this.qunit,\n    );\n\n    assert.rejects(\n      testLoader.loadModules(),\n      /You must specify partitions numbered greater than 0/,\n    );\n  });\n\n  test('load works with a double-digit single partition', async function (assert) {\n    window.requirejs.entries = {\n      'test-1-test': true,\n      'test-2-test': true,\n      'test-3-test': true,\n      'test-4-test': true,\n      'test-5-test': true,\n      'test-6-test': true,\n      'test-7-test': true,\n      'test-8-test': true,\n      'test-9-test': true,\n      'test-10-test': true,\n    };\n\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('partition', '10').set('split', 10),\n      this.qunit,\n    );\n\n    await testLoader.loadModules();\n\n    assert.deepEqual(this.requiredModules, ['test-10-test']);\n  });\n\n  test('emit then `set-modules-queue` event when load balance option is true', async function (assert) {\n    const testLoader = new EmberExamTestLoader(\n      this.testem,\n      new Map().set('loadBalance', true),\n      this.qunit,\n    );\n\n    await testLoader.loadModules();\n\n    assert.deepEqual(\n      this.testem.eventQueue,\n      ['testem:set-modules-queue'],\n      'testem:set-modules-queue event was fired',\n    );\n  });\n});\n"
  },
  {
    "path": "tests/unit/testem-output-test.js",
    "content": "import * as TestemOutput from 'ember-exam/test-support/-private/patch-testem-output';\nimport { module, test } from 'qunit';\n\nmodule('Unit | patch-testem-output', function () {\n  module('`preserveTestName` is passed', function () {\n    test('does not add partition number to test name when `split` is passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map().set('split', 2).set('preserveTestName', true),\n          'test_module | test_name',\n        ),\n        'test_module | test_name',\n      );\n    });\n\n    test('does not add partition number to test name when `split` and `partition` are passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map()\n            .set('split', 2)\n            .set('partition', 2)\n            .set('preserveTestName', true),\n          'test_module | test_name',\n        ),\n        'test_module | test_name',\n      );\n    });\n\n    test('does not add browser number to test name when `loadBalance` and `browser` are passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map()\n            .set('loadBalance', 2)\n            .set('browser', 1)\n            .set('preserveTestName', true),\n          'test_module | test_name',\n        ),\n        'test_module | test_name',\n      );\n    });\n\n    test('does not add partition number, browser number to test name when `split`, `partition`, `browser`, and `loadBalance` are  passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map()\n            .set('split', 2)\n            .set('partition', 2)\n            .set('browser', 1)\n            .set('loadBalance', 2)\n            .set('preserveTestName', true),\n          'test_module | test_name',\n        ),\n        'test_module | test_name',\n      );\n    });\n  });\n\n  module('`preserveTestName` is not passed', function () {\n    test('adds partition number to test name when `split` is passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map().set('split', 2),\n          'test_module | test_name',\n        ),\n        'Exam Partition 1 - test_module | test_name',\n      );\n    });\n\n    test('adds partition number to test name when `split` and `partition` are passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map().set('split', 2).set('partition', 2),\n          'test_module | test_name',\n        ),\n        'Exam Partition 2 - test_module | test_name',\n      );\n    });\n\n    test('adds browser number to test name when `loadBalance` and `browser` are passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map().set('loadBalance', 2).set('browser', 1),\n          'test_module | test_name',\n        ),\n        'Browser Id 1 - test_module | test_name',\n      );\n    });\n\n    test('adds partition number, browser number to test name when `split`, `partition`, `browser`, and `loadBalance` are  passed', function (assert) {\n      assert.deepEqual(\n        TestemOutput.updateTestName(\n          new Map()\n            .set('split', 2)\n            .set('partition', 2)\n            .set('browser', 1)\n            .set('loadBalance', 2),\n          'test_module | test_name',\n        ),\n        'Exam Partition 2 - Browser Id 1 - test_module | test_name',\n      );\n    });\n  });\n});\n"
  },
  {
    "path": "tests/unit/weight-test-modules-test.js",
    "content": "import weightTestModules from 'ember-exam/test-support/-private/weight-test-modules';\nimport { module, test } from 'qunit';\n\nmodule('Unit | weight-test-modules', function () {\n  test('should sort a list of file paths by weight', function (assert) {\n    const listOfModules = [\n      '/acceptance/test-1-test',\n      '/unit/test-1-test',\n      '/integration/test-1-test',\n      'test-1-test',\n    ];\n\n    assert.deepEqual(\n      [\n        '/acceptance/test-1-test',\n        'test-1-test',\n        '/integration/test-1-test',\n        '/unit/test-1-test',\n      ],\n      weightTestModules(listOfModules),\n    );\n  });\n\n  test('should sort a list of file paths by weight and alphabetical order', function (assert) {\n    const listOfModules = [\n      'test-b-test',\n      'test-a-test',\n      '/integration/test-b-test',\n      '/integration/test-a-test',\n      '/unit/test-b-test',\n      '/acceptance/test-b-test',\n      '/acceptance/test-a-test',\n      '/unit/test-a-test',\n    ];\n\n    assert.deepEqual(\n      [\n        '/acceptance/test-a-test',\n        '/acceptance/test-b-test',\n        'test-a-test',\n        'test-b-test',\n        '/integration/test-a-test',\n        '/integration/test-b-test',\n        '/unit/test-a-test',\n        '/unit/test-b-test',\n      ],\n      weightTestModules(listOfModules),\n    );\n  });\n});\n"
  }
]