Repository: trentmwillis/ember-exam Branch: main Commit: db0fe23257e2 Files: 200 Total size: 339.4 KB Directory structure: gitextract_2rp793zd/ ├── .codeclimate.yml ├── .editorconfig ├── .ember-cli ├── .github/ │ ├── renovate.json5 │ └── workflows/ │ ├── ci.yml │ ├── gh-pages.yml │ ├── plan-release.yml │ ├── publish.yml │ └── release.yml ├── .gitignore ├── .npmignore ├── .prettierignore ├── .prettierrc.js ├── .release-plan.json ├── .watchmanconfig ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE.md ├── README.md ├── RELEASE.md ├── addon-test-support/ │ ├── -private/ │ │ ├── async-iterator.js │ │ ├── ember-exam-test-loader.js │ │ ├── filter-test-modules.js │ │ ├── get-url-params.js │ │ ├── patch-testem-output.js │ │ ├── split-test-modules.js │ │ └── weight-test-modules.js │ ├── index.d.ts │ ├── index.js │ ├── load.js │ └── start.js ├── docs-app/ │ ├── .gitignore │ ├── .vitepress/ │ │ ├── config.mts │ │ └── theme/ │ │ ├── index.ts │ │ └── style.css │ ├── ember-try-and-ci.md │ ├── filtering.md │ ├── index.md │ ├── load-balancing.md │ ├── module-metadata.md │ ├── package.json │ ├── preserve-test-name.md │ ├── quickstart.md │ ├── randomization-iterator.md │ ├── randomization.md │ ├── split-parallel.md │ ├── splitting.md │ ├── test-suite-segmentation.md │ └── tsconfig.json ├── ember-cli-build.js ├── eslint.config.mjs ├── index.js ├── lib/ │ ├── commands/ │ │ ├── exam/ │ │ │ └── iterate.js │ │ ├── exam.js │ │ ├── index.js │ │ └── task/ │ │ ├── test-server.js │ │ └── test.js │ └── utils/ │ ├── config-reader.js │ ├── execution-state-manager.js │ ├── file-system-helper.js │ ├── query-helper.js │ ├── test-page-helper.js │ ├── testem-events.js │ └── tests-options-validator.js ├── node-tests/ │ ├── .eslintrc │ ├── acceptance/ │ │ ├── exam/ │ │ │ └── vite/ │ │ │ └── vite-test.js │ │ ├── exam-iterate-test.js │ │ ├── exam-test.js │ │ └── helpers.js │ ├── fixtures/ │ │ ├── browser-exit.js │ │ ├── failure.js │ │ ├── test-helper-with-load.js │ │ └── vite-eager-test-load.html │ ├── list.mjs │ └── unit/ │ ├── commands/ │ │ └── exam-test.js │ └── utils/ │ ├── config-reader-test.js │ ├── execution-state-manager-test.js │ ├── query-helper-test.js │ ├── test-page-helper-test.js │ ├── testem-events-test.js │ └── tests-options-validator-test.js ├── package.json ├── pnpm-workspace.yaml ├── test-apps/ │ ├── broccoli/ │ │ ├── .editorconfig │ │ ├── .ember-cli │ │ ├── .github/ │ │ │ └── workflows/ │ │ │ └── ci.yml │ │ ├── .gitignore │ │ ├── .prettierignore │ │ ├── .prettierrc.js │ │ ├── .stylelintignore │ │ ├── .stylelintrc.js │ │ ├── .template-lintrc.js │ │ ├── .watchmanconfig │ │ ├── README.md │ │ ├── app/ │ │ │ ├── app.js │ │ │ ├── components/ │ │ │ │ └── .gitkeep │ │ │ ├── controllers/ │ │ │ │ └── .gitkeep │ │ │ ├── helpers/ │ │ │ │ └── .gitkeep │ │ │ ├── index.html │ │ │ ├── models/ │ │ │ │ └── .gitkeep │ │ │ ├── router.js │ │ │ ├── routes/ │ │ │ │ └── .gitkeep │ │ │ ├── styles/ │ │ │ │ └── app.css │ │ │ └── templates/ │ │ │ └── application.hbs │ │ ├── config/ │ │ │ ├── ember-cli-update.json │ │ │ ├── environment.js │ │ │ ├── optional-features.json │ │ │ └── targets.js │ │ ├── ember-cli-build.js │ │ ├── eslint.config.mjs │ │ ├── package.json │ │ ├── public/ │ │ │ └── robots.txt │ │ ├── testem.js │ │ └── tests/ │ │ ├── index.html │ │ └── test-helper.js │ ├── embroider3-webpack/ │ │ ├── .editorconfig │ │ ├── .ember-cli │ │ ├── .github/ │ │ │ └── workflows/ │ │ │ └── ci.yml │ │ ├── .gitignore │ │ ├── .prettierignore │ │ ├── .prettierrc.js │ │ ├── .stylelintignore │ │ ├── .stylelintrc.js │ │ ├── .template-lintrc.js │ │ ├── .watchmanconfig │ │ ├── README.md │ │ ├── app/ │ │ │ ├── app.js │ │ │ ├── components/ │ │ │ │ └── .gitkeep │ │ │ ├── controllers/ │ │ │ │ └── .gitkeep │ │ │ ├── deprecation-workflow.js │ │ │ ├── helpers/ │ │ │ │ └── .gitkeep │ │ │ ├── index.html │ │ │ ├── models/ │ │ │ │ └── .gitkeep │ │ │ ├── router.js │ │ │ ├── routes/ │ │ │ │ └── .gitkeep │ │ │ ├── styles/ │ │ │ │ └── app.css │ │ │ └── templates/ │ │ │ └── application.hbs │ │ ├── config/ │ │ │ ├── ember-cli-update.json │ │ │ ├── environment.js │ │ │ ├── optional-features.json │ │ │ └── targets.js │ │ ├── ember-cli-build.js │ │ ├── eslint.config.mjs │ │ ├── package.json │ │ ├── public/ │ │ │ └── robots.txt │ │ ├── testem.js │ │ └── tests/ │ │ ├── index.html │ │ └── test-helper.js │ └── vite-with-compat/ │ ├── .editorconfig │ ├── .ember-cli │ ├── .gitignore │ ├── .prettierignore │ ├── .prettierrc.mjs │ ├── .template-lintrc.mjs │ ├── .watchmanconfig │ ├── README.md │ ├── app/ │ │ ├── app.js │ │ ├── config/ │ │ │ └── environment.js │ │ └── router.js │ ├── babel.config.cjs │ ├── config/ │ │ ├── ember-cli-update.json │ │ ├── environment.js │ │ ├── optional-features.json │ │ └── targets.js │ ├── ember-cli-build.js │ ├── eslint.config.mjs │ ├── index.html │ ├── package.json │ ├── public/ │ │ └── robots.txt │ ├── testem.cjs │ ├── tests/ │ │ ├── index.html │ │ ├── integration/ │ │ │ ├── a-test.gjs │ │ │ └── b-test.gjs │ │ └── test-helper.js │ └── vite.config.mjs ├── testem.js ├── testem.multiple-test-page.js ├── testem.no-test-page.js ├── testem.simple-test-page.js └── tests/ ├── dummy/ │ ├── app/ │ │ ├── app.js │ │ ├── index.html │ │ ├── router.js │ │ └── styles/ │ │ └── app.css │ ├── config/ │ │ ├── ember-cli-update.json │ │ ├── ember-try.js │ │ ├── environment.js │ │ ├── optional-features.json │ │ └── targets.js │ └── public/ │ ├── crossdomain.xml │ └── robots.txt ├── index.html ├── test-helper.js └── unit/ ├── async-iterator-test.js ├── filter-test-modules-test.js ├── multiple-edge-cases-test.js ├── multiple-ember-tests-test.js ├── multiple-tests-test.js ├── test-loader-test.js ├── testem-output-test.js └── weight-test-modules-test.js ================================================ FILE CONTENTS ================================================ ================================================ FILE: .codeclimate.yml ================================================ --- engines: duplication: enabled: true config: languages: javascript: mass_threshold: 50 eslint: enabled: true fixme: enabled: true ratings: paths: - "**.js" exclude_paths: - config/ - tests/ - vendor/ ================================================ FILE: .editorconfig ================================================ # EditorConfig helps developers define and maintain consistent # coding styles between different editors and IDEs # editorconfig.org root = true [*] end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true indent_style = space indent_size = 2 [*.hbs] insert_final_newline = false [*.{diff,md}] trim_trailing_whitespace = false ================================================ FILE: .ember-cli ================================================ { /** Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript rather than JavaScript by default, when a TypeScript version of a given blueprint is available. */ "isTypeScriptProject": false } ================================================ FILE: .github/renovate.json5 ================================================ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "config:base", ":automergeLinters", ":automergeTesters", ":dependencyDashboard", ":maintainLockFilesWeekly", ":pinOnlyDevDependencies", ":prConcurrentLimitNone", ":semanticCommitsDisabled", "github>Turbo87/renovate-config:automergeCaretConstraint", "github>Turbo87/renovate-config:commitTopics", "github>NullVoxPopuli/renovate:npm.json5" ], } ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: push: branches: [ master, main, 'v*' ] pull_request: branches: [ master, main ] concurrency: group: ci-${{ github.head_ref || github.ref }} cancel-in-progress: true jobs: setup: name: 'Setup' runs-on: ubuntu-latest timeout-minutes: 2 outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - uses: actions/checkout@v6 - uses: actions/setup-node@v6 with: # This version is different, because we want newer node features # so that we can skip pnpm install for this job node-version: 24 - id: set-matrix run: | echo "matrix=$(node ./node-tests/list.mjs)" >> $GITHUB_OUTPUT lint: name: Lints runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 18 cache: 'pnpm' - run: pnpm install - run: pnpm lint test: name: "Ember | ${{ matrix.app.name }}" runs-on: ubuntu-latest timeout-minutes: 10 strategy: fail-fast: false matrix: app: - { name: "Broccoli (v1 Addon)", dir: ".", cmd: "pnpm test:ember" } - { name: "Broccoli (v1 App)", dir: './test-apps/broccoli', cmd: 'pnpm test:ember' } - { name: "Webpack + Embroider 3 ", dir: './test-apps/embroider3-webpack', cmd: 'pnpm test:ember' } - { name: "Vite + Compat", dir: './test-apps/vite-with-compat', cmd: 'pnpm build:tests && pnpm test:exam' } steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 18 cache: 'pnpm' - run: pnpm install - run: ${{ matrix.app.cmd }} working-directory: ${{ matrix.app.dir }} test-node: name: "Mocha | ${{ matrix.name }}" needs: ["setup"] runs-on: ubuntu-latest timeout-minutes: 30 strategy: fail-fast: false matrix: ${{fromJson(needs.setup.outputs.matrix)}} steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 18 cache: 'pnpm' - run: pnpm install - run: ${{ matrix.command }} floating-dependencies: name: "Floating Dependencies" runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 18 cache: 'pnpm' - run: pnpm install - run: pnpm test:ember try-scenarios: name: "Try: ${{ matrix.ember-try-scenario }}" runs-on: ubuntu-latest timeout-minutes: 10 needs: test strategy: fail-fast: false matrix: ember-try-scenario: - ember-lts-4.8 - ember-lts-4.12 - ember-release - ember-beta - ember-canary steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 18 cache: 'pnpm' - run: pnpm install - run: node_modules/.bin/ember try:one ${{ matrix.ember-try-scenario }} --skip-cleanup ================================================ FILE: .github/workflows/gh-pages.yml ================================================ name: Deploy on: push: branches: [ master, main, 'v*' ] concurrency: group: gh-pages-${{ github.head_ref || github.ref }} cancel-in-progress: true jobs: # Build job build: runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 18 cache: 'pnpm' - run: pnpm install - run: cd docs-app && pnpm docs:build - name: Upload static files as artifact id: deployment uses: actions/upload-pages-artifact@v4 with: path: docs-app/.vitepress/dist deploy: needs: build permissions: pages: write # to deploy to Pages id-token: write # to verify the deployment originates from an appropriate source # Deploy to the github-pages environment environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} # Specify runner + deployment step runs-on: ubuntu-latest steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 # or specific "vX.X.X" version tag for this action ================================================ FILE: .github/workflows/plan-release.yml ================================================ name: Plan Release on: workflow_dispatch: push: branches: - main - master pull_request_target: # This workflow has permissions on the repo, do NOT run code from PRs in this workflow. See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ types: - labeled - unlabeled concurrency: group: plan-release # only the latest one of these should ever be running cancel-in-progress: true jobs: should-run-release-plan-prepare: name: Should we run release-plan prepare? runs-on: ubuntu-latest outputs: should-prepare: ${{ steps.should-prepare.outputs.should-prepare }} steps: - uses: release-plan/actions/should-prepare-release@v1 with: ref: 'main' id: should-prepare create-prepare-release-pr: name: Create Prepare Release PR runs-on: ubuntu-latest timeout-minutes: 5 needs: should-run-release-plan-prepare permissions: contents: write issues: read pull-requests: write if: needs.should-run-release-plan-prepare.outputs.should-prepare == 'true' steps: - uses: release-plan/actions/prepare@v1 name: Run release-plan prepare with: ref: 'main' env: GITHUB_AUTH: ${{ secrets.GITHUB_TOKEN }} id: explanation - uses: peter-evans/create-pull-request@v8 name: Create Prepare Release PR with: commit-message: "Prepare Release ${{ steps.explanation.outputs.new-version}} using 'release-plan'" labels: "internal" sign-commits: true branch: release-preview title: Prepare Release ${{ steps.explanation.outputs.new-version }} body: | This PR is a preview of the release that [release-plan](https://github.com/embroider-build/release-plan) has prepared. To release you should just merge this PR 👍 ----------------------------------------- ${{ steps.explanation.outputs.text }} ================================================ FILE: .github/workflows/publish.yml ================================================ # For every push to the primary branch with .release-plan.json modified, # runs release-plan. name: Publish Stable on: workflow_dispatch: push: branches: - main - master paths: - '.release-plan.json' concurrency: group: publish-${{ github.head_ref || github.ref }} cancel-in-progress: true jobs: publish: name: "NPM Publish" runs-on: ubuntu-latest permissions: contents: write id-token: write attestations: write steps: - uses: actions/checkout@v6 - uses: pnpm/action-setup@v4 - uses: actions/setup-node@v6 with: node-version: 22 registry-url: 'https://registry.npmjs.org' cache: pnpm - run: npm install -g npm@latest # ensure that the globally installed npm is new enough to support OIDC - run: pnpm install --frozen-lockfile - name: Publish to NPM run: NPM_CONFIG_PROVENANCE=true pnpm release-plan publish env: GITHUB_AUTH: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/release.yml ================================================ name: Release on: push: tags: - 'v*' jobs: release: name: release runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions/setup-node@v6 with: node-version: 18 registry-url: 'https://registry.npmjs.org' - run: yarn install - run: yarn auto-dist-tag --write - run: npm publish env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} ================================================ FILE: .gitignore ================================================ # compiled output /dist/ /acceptance-dist/ dist-*/ test-execution-*.json /declarations/ # dependencies /node_modules/ # misc /connect.lock .log/ /.env* /.pnp* /.eslintcache /coverage/ /libpeerconnection.log /npm-debug.log* /testem.log /yarn-error.log # ember-try /.node_modules.ember-try/ /npm-shrinkwrap.json.ember-try /package.json.ember-try /.nyc_output/ /package-lock.json.ember-try /yarn.lock.ember-try # broccoli-debug /DEBUG/ ================================================ FILE: .npmignore ================================================ # compiled output /dist/ /tmp/ # misc /.codeclimate.yml /.editorconfig /.ember-cli /.env* /.eslintcache /.eslintignore /.eslintrc.js /.git/ /.github/ /.gitignore /.prettierignore /.prettierrc.js /.stylelintignore /.stylelintrc.js /.template-lintrc.js /.travis.yml /.watchmanconfig /CHANGELOG.md /CONTRIBUTING.md /config/ /ember-cli-build.js /node-tests/ /RELEASE.md /testem*.js /tests/ /yarn.lock /*.tgz .gitkeep eslint.config.mjs # ember-try /.node_modules.ember-try/ /npm-shrinkwrap.json.ember-try /package.json.ember-try /.nyc_output/ /package-lock.json.ember-try /yarn.lock.ember-try ================================================ FILE: .prettierignore ================================================ # unconventional js /blueprints/*/files/ # compiled output /dist/ docs-app/ test-apps/ acceptance-dist/ failure-dist/ # misc /coverage/ !.* .*/ # ember-try /.node_modules.ember-try/ # Ignored when enabling prettier in CI # Changes are too much, and also not super functional *.yml *.yaml *.md *.html *.json ================================================ FILE: .prettierrc.js ================================================ 'use strict'; module.exports = { overrides: [ { files: '*.{js,ts}', options: { singleQuote: true, }, }, ], }; ================================================ FILE: .release-plan.json ================================================ { "solution": { "ember-exam": { "impact": "minor", "oldVersion": "10.0.1", "newVersion": "10.1.0", "tagName": "latest", "constraints": [ { "impact": "minor", "reason": "Appears in changelog section :rocket: Enhancement" }, { "impact": "patch", "reason": "Appears in changelog section :memo: Documentation" } ], "pkgJSONPath": "./package.json" } }, "description": "## Release (2025-12-19)\n\n* ember-exam 10.1.0 (minor)\n\n#### :rocket: Enhancement\n* `ember-exam`\n * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski))\n\n#### :memo: Documentation\n* `ember-exam`\n * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski))\n * [#1455](https://github.com/ember-cli/ember-exam/pull/1455) Update README.md with timeout help ([@apellerano-pw](https://github.com/apellerano-pw))\n\n#### Committers: 2\n- Andrew Pellerano ([@apellerano-pw](https://github.com/apellerano-pw))\n- Ben Demboski ([@bendemboski](https://github.com/bendemboski))\n" } ================================================ FILE: .watchmanconfig ================================================ { "ignore_dirs": ["dist"] } ================================================ FILE: CHANGELOG.md ================================================ # Changelog ## Release (2025-12-19) * ember-exam 10.1.0 (minor) #### :rocket: Enhancement * `ember-exam` * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski)) #### :memo: Documentation * `ember-exam` * [#1489](https://github.com/ember-cli/ember-exam/pull/1489) Better vite support ([@bendemboski](https://github.com/bendemboski)) * [#1455](https://github.com/ember-cli/ember-exam/pull/1455) Update README.md with timeout help ([@apellerano-pw](https://github.com/apellerano-pw)) #### Committers: 2 - Andrew Pellerano ([@apellerano-pw](https://github.com/apellerano-pw)) - Ben Demboski ([@bendemboski](https://github.com/bendemboski)) ## Release (2025-12-03) * ember-exam 10.0.1 (patch) #### :bug: Bug Fix * `ember-exam` * [#1482](https://github.com/ember-cli/ember-exam/pull/1482) Read configFile From commandOptions ([@jrjohnson](https://github.com/jrjohnson)) #### Committers: 1 - Jon Johnson ([@jrjohnson](https://github.com/jrjohnson)) ## Release (2025-08-26) * ember-exam 10.0.0 (major) #### :boom: Breaking Change * `ember-exam` * [#1430](https://github.com/ember-cli/ember-exam/pull/1430) Support vite ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) #### :bug: Bug Fix * `ember-exam` * [#1450](https://github.com/ember-cli/ember-exam/pull/1450) Support cjs testem configs ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) #### :memo: Documentation * `ember-exam` * [#1347](https://github.com/ember-cli/ember-exam/pull/1347) Update setup example for new qunit requirements ([@elwayman02](https://github.com/elwayman02)) #### :house: Internal * `ember-exam` * [#1449](https://github.com/ember-cli/ember-exam/pull/1449) Spit node-tests in to parallel jobs for easier retries (we have a very short testem timeout) ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1451](https://github.com/ember-cli/ember-exam/pull/1451) Delete test duplication and use symlinks instead ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1452](https://github.com/ember-cli/ember-exam/pull/1452) Remove extraneous command in CI workflow ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1448](https://github.com/ember-cli/ember-exam/pull/1448) Split out try scenarios in to real apps for easier debugging ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1443](https://github.com/ember-cli/ember-exam/pull/1443) Remove unused deps ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1442](https://github.com/ember-cli/ember-exam/pull/1442) Get rid of custom resolver form an older era of the blueprint ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1439](https://github.com/ember-cli/ember-exam/pull/1439) Update renovate-config (move to weekly) ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1441](https://github.com/ember-cli/ember-exam/pull/1441) Set base for pages deployment ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1440](https://github.com/ember-cli/ember-exam/pull/1440) Fix static files path for gh-pages deploy ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1435](https://github.com/ember-cli/ember-exam/pull/1435) Strict dep management settings + re-roll lockfile, remove addon-docs, add vitepress ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1434](https://github.com/ember-cli/ember-exam/pull/1434) Add prettier to lint, don't run lint with tests ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1431](https://github.com/ember-cli/ember-exam/pull/1431) Upgrade eslint / prettier ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) #### Committers: 2 - Jordan Hawker ([@elwayman02](https://github.com/elwayman02)) - [@NullVoxPopuli](https://github.com/NullVoxPopuli) ## Release (2025-03-05) ember-exam 9.1.0 (minor) #### :rocket: Enhancement * `ember-exam` * [#1313](https://github.com/ember-cli/ember-exam/pull/1313) Use ember-exam with vite ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) #### :house: Internal * `ember-exam` * [#1336](https://github.com/ember-cli/ember-exam/pull/1336) Update release-plan ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1333](https://github.com/ember-cli/ember-exam/pull/1333) Fix lints since eslint-plugin-ember was upgraded ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1332](https://github.com/ember-cli/ember-exam/pull/1332) Revert #1188 ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1330](https://github.com/ember-cli/ember-exam/pull/1330) Revert "Update dependency ember-qunit to v9" ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1329](https://github.com/ember-cli/ember-exam/pull/1329) Revert "Update pnpm to v10.5.2" ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1322](https://github.com/ember-cli/ember-exam/pull/1322) Setup Release plan ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1314](https://github.com/ember-cli/ember-exam/pull/1314) Convert to pnpm ([@NullVoxPopuli](https://github.com/NullVoxPopuli)) * [#1289](https://github.com/ember-cli/ember-exam/pull/1289) Add .codeclimate.yml to .npmignore ([@SergeAstapov](https://github.com/SergeAstapov)) #### Committers: 2 - Sergey Astapov ([@SergeAstapov](https://github.com/SergeAstapov)) - [@NullVoxPopuli](https://github.com/NullVoxPopuli) ## v9.0.0 (2023-12-29) #### :boom: Breaking Change * [#1125](https://github.com/ember-cli/ember-exam/pull/1125) Update ember to 5.5, drop Nodes below 18, drop Mocha support ([@andreyfel](https://github.com/andreyfel)) #### :rocket: Enhancement * [#963](https://github.com/ember-cli/ember-exam/pull/963) Add preserveTestName CLI flag to remove partition and browser ([@tasha-urbancic](https://github.com/tasha-urbancic)) #### :house: Internal * [#1127](https://github.com/ember-cli/ember-exam/pull/1127) Run node tests in CI ([@andreyfel](https://github.com/andreyfel)) #### Committers: 2 - Andrey Fel ([@andreyfel](https://github.com/andreyfel)) - Natasha Urbancic ([@tasha-urbancic](https://github.com/tasha-urbancic)) ## v8.0.0 (2022-01-25) #### :boom: Breaking Change * [#769](https://github.com/ember-cli/ember-exam/pull/769) Drop support for Ember 3.19 and below ([@Turbo87](https://github.com/Turbo87)) #### :house: Internal * [#840](https://github.com/ember-cli/ember-exam/pull/840) Upgrade `@embroider/*` packages to 1.0.0 ([@SergeAstapov](https://github.com/SergeAstapov)) * [#745](https://github.com/ember-cli/ember-exam/pull/745) Upgrade eslint-plugin-ember from v8.9.1 to v10.5.8 ([@SergeAstapov](https://github.com/SergeAstapov)) * [#813](https://github.com/ember-cli/ember-exam/pull/813) Use `assert.strictEqual()` instead of `assert.equal()` ([@Turbo87](https://github.com/Turbo87)) * [#775](https://github.com/ember-cli/ember-exam/pull/775) Delete unused `herp-derp` component ([@Turbo87](https://github.com/Turbo87)) * [#774](https://github.com/ember-cli/ember-exam/pull/774) Migrate dummy app templates to use angle bracket invocation syntax ([@Turbo87](https://github.com/Turbo87)) * [#740](https://github.com/ember-cli/ember-exam/pull/740) CI: Enable Ember v4 scenarios again ([@Turbo87](https://github.com/Turbo87)) * [#768](https://github.com/ember-cli/ember-exam/pull/768) Upgrade `ember-cli-addon-docs` dependency ([@Turbo87](https://github.com/Turbo87)) * [#766](https://github.com/ember-cli/ember-exam/pull/766) CI: Disable failing `ember-release` scenario ([@Turbo87](https://github.com/Turbo87)) * [#748](https://github.com/ember-cli/ember-exam/pull/748) Add eslint-plugin-qunit per latest addon blueprint ([@SergeAstapov](https://github.com/SergeAstapov)) * [#744](https://github.com/ember-cli/ember-exam/pull/744) Update npmignore file ([@Turbo87](https://github.com/Turbo87)) #### Committers: 3 - Sergey Astapov ([@SergeAstapov](https://github.com/SergeAstapov)) - Stephen Yeung ([@step2yeung](https://github.com/step2yeung)) - Tobias Bieniek ([@Turbo87](https://github.com/Turbo87)) ## v7.0.1 (2021-11-02) #### :bug: Bug Fix * [#760](https://github.com/ember-cli/ember-exam/pull/760) Wait for all browser to completet beforer cleaning up StateManager([@step2yeung](https://github.com/step2yeung)) * [#750](https://github.com/ember-cli/ember-exam/pull/750) Ember exam failing when browser ID not found, return 0([@step2yeung](https://github.com/step2yeung)) #### :house: Internal * [#748](https://github.com/ember-cli/ember-exam/pull/748) Add eslint-plugin-qunit per latest addon blueprint internal ([@SergeAstapov](https://github.com/SergeAstapov)) * [#744](https://github.com/ember-cli/ember-exam/pull/744) Update npmignore file internal([@Turbo87](https://github.com/Turbo87)) #### Committers: 4 - Sergey Astapov ([@SergeAstapov](https://github.com/SergeAstapov)) - Tobias Bieniek ([@Turbo87](https://github.com/Turbo87)) - Stephen Yeung ([@step2yeung](https://github.com/step2yeung)) ## v7.0.0 (2021-10-22) #### :boom: Breaking Change * [#739](https://github.com/ember-cli/ember-exam/pull/739) Update `ember-auto-import` to v2.x ([@Turbo87](https://github.com/Turbo87)) * [#690](https://github.com/ember-cli/ember-exam/pull/690) Drop support for Node 10 and upgrade deps ([@nlfurniss](https://github.com/nlfurniss)) #### :bug: Bug Fix * [#688](https://github.com/ember-cli/ember-exam/pull/688) Fix embroider tests ([@nlfurniss](https://github.com/nlfurniss)) #### :memo: Documentation * [#687](https://github.com/ember-cli/ember-exam/pull/687) Update README.md: Fix typo in flag name ([@bantic](https://github.com/bantic)) * [#644](https://github.com/ember-cli/ember-exam/pull/644) Docs: Fix information on Load Balancing ([@brkn](https://github.com/brkn)) #### :house: Internal * [#743](https://github.com/ember-cli/ember-exam/pull/743) CI: Add `release` workflow ([@Turbo87](https://github.com/Turbo87)) * [#737](https://github.com/ember-cli/ember-exam/pull/737) Use `prettier` to format JS files ([@Turbo87](https://github.com/Turbo87)) * [#736](https://github.com/ember-cli/ember-exam/pull/736) CI: Disable Ember.js v4 scenarios ([@Turbo87](https://github.com/Turbo87)) * [#689](https://github.com/ember-cli/ember-exam/pull/689) Set ember edition to Octane to quiet build logging ([@nlfurniss](https://github.com/nlfurniss)) #### Committers: 4 - Berkan Ünal ([@brkn](https://github.com/brkn)) - Cory Forsyth ([@bantic](https://github.com/bantic)) - Nathaniel Furniss ([@nlfurniss](https://github.com/nlfurniss)) - Tobias Bieniek ([@Turbo87](https://github.com/Turbo87)) ## v6.1.0 (2021-02-17) #### :rocket: Enhancement * [#652](https://github.com/ember-cli/ember-exam/pull/652) Update to support `ember-qunit@5` ([@thoov](https://github.com/thoov)) #### Committers: 1 - Travis Hoover ([@thoov](https://github.com/thoov)) ## v6.0.1 (2020-10-28) #### :bug: Bug Fix * [#617](https://github.com/ember-cli/ember-exam/pull/617) Update @embroider/macros to fix ember-qunit@5.0.0-beta support. ([@rwjblue](https://github.com/rwjblue)) #### :house: Internal * [#618](https://github.com/ember-cli/ember-exam/pull/618) Swap to GitHub actions for CI. ([@rwjblue](https://github.com/rwjblue)) #### Committers: 2 - Robert Jackson ([@rwjblue](https://github.com/rwjblue)) - [@dependabot-preview[bot]](https://github.com/apps/dependabot-preview) ## v6.0.0 (2020-10-12) #### :boom: Breaking Change * [#615](https://github.com/ember-cli/ember-exam/pull/615) Drop Node 13 support. ([@rwjblue](https://github.com/rwjblue)) * [#600](https://github.com/ember-cli/ember-exam/pull/600) Drop Node 11 support. ([@thoov](https://github.com/thoov)) #### :rocket: Enhancement * [#599](https://github.com/ember-cli/ember-exam/pull/599) Embroider support when `staticAddonTestSupportTrees` enabled ([@thoov](https://github.com/thoov)) #### :bug: Bug Fix * [#410](https://github.com/ember-cli/ember-exam/pull/410) Fail if parallel is not a numeric value ([@step2yeung](https://github.com/step2yeung)) #### :memo: Documentation * [#612](https://github.com/ember-cli/ember-exam/pull/612) Update README.md ([@jrowlingson](https://github.com/jrowlingson)) * [#588](https://github.com/ember-cli/ember-exam/pull/588) Add note about `--random` and `--load-balance` ([@kellyselden](https://github.com/kellyselden)) #### :house: Internal * [#614](https://github.com/ember-cli/ember-exam/pull/614) Update release automation setup. ([@rwjblue](https://github.com/rwjblue)) * [#604](https://github.com/ember-cli/ember-exam/pull/604) Fixing bad yarn lock merge ([@thoov](https://github.com/thoov)) * [#600](https://github.com/ember-cli/ember-exam/pull/600) Fix test suite to run mocha variants during CI ([@thoov](https://github.com/thoov)) #### Committers: 6 - Jack Rowlingson ([@jrowlingson](https://github.com/jrowlingson)) - Kelly Selden ([@kellyselden](https://github.com/kellyselden)) - Robert Jackson ([@rwjblue](https://github.com/rwjblue)) - Stephen Yeung ([@step2yeung](https://github.com/step2yeung)) - Travis Hoover ([@thoov](https://github.com/thoov)) - [@dependabot-preview[bot]](https://github.com/apps/dependabot-preview) v5.0.1 / 2020-04-21 =================== * Bump fs-extra from 8.1.0 to 9.0.0 * Bump sinon from 7.5.0 to 9.0.2 * Bump cli-table3 from 0.5.1 to 0.6.0 * Bump ember-resolver from 6.0.2 to 8.0.0 * Bump ember-source from 3.17.2 to 3.18.0 * Bump semver from 7.1.3 to 7.3.2 * Bump eslint-plugin-node from 11.0.0 to 11.1.0 * Bump semver from 7.1.3 to 7.3.2 * Bump ember-template-lint from 2.4.1 to 2.5.2 * Bump mocha from 7.1.0 to 7.1.1 * Bump testdouble from 3.13.0 to 3.13.1 * Bump nyc from 15.0.0 to 15.0.1 * Bump ember-cli-htmlbars from 4.2.2 to 4.3.0 * Bump ember-cli from 3.16.0 to 3.17.0 * Bump ember-cli-babel from 7.18.0 to 7.19.0 * Bump ember-source from 3.17.1 to 3.17.2 * Bump ember-template-lint from 2.4.0 to 2.4.1 * Bump ember-source from 3.17.0 to 3.17.1 * Bump eslint-plugin-ember from 7.10.1 to 7.11.1 * Bump eslint-plugin-ember from 7.9.0 to 7.10.1 * Bump ember-template-lint from 2.3.0 to 2.4.0 v5.0.0 / 2020-03-06 =================== * [Enhancement] Update docs for ember-cli-addon-docs (@choheekim) * [Enhancement] Update node engine to be above 10 (@choheekim) * [Enhancement] Enables to execute completeBrowserHandler() when there is browser(s) failed to attach to server (@choheekim) * [Enhancement] _getTestFramework checks for ember-mocha package (@choheekim) * [Enhancement] updating header comments to fix warnings during "ember build" (@dcombslinkedin) * [BugFix] fix invalid ES module usage (@ef3) * Bump ember-source from 3.16.3 to 3.17.0 * Bump ember-template-lint from 1.14.0 to 2.3.0 * Bump eslint-plugin-ember from 7.8.1 to 7.9.0 * Bump testdouble from 3.12.5 to 3.13.0 * Bump mocha from 7.0.1 to 7.1.0 * Bump ember-template-lint from 1.13.2 to 1.14.0 * Bump ember-source from 3.14.3 to 3.16.3 * Bump semver from 7.1.2 to 7.1.3 * Bump ember-cli from 3.15.2 to 3.16.0 * Bump ember-cli-babel from 7.17.1 to 7.18.0 * Bump eslint-plugin-ember from 7.7.2 to 7.8.1 * Bump rimraf from 3.0.1 to 3.0.2 * Bump ember-cli-babel from 7.14.1 to 7.17.1 * Bump semver from 6.3.0 to 7.1.2 * Bump mocha from 6.2.2 to 7.0.1 * Bump ember-cli-babel from 7.13.2 to 7.14.1 * Bump rimraf from 3.0.0 to 3.0.1 * Bump ember-cli from 3.15.1 to 3.15.2 * Bump ember-cli-addon-docs-yuidoc from 0.2.3 to 0.2.4 * Bump ember-template-lint from 1.13.0 to 1.13.2 * Bump ember-cli-htmlbars from 4.2.1 to 4.2.2 * Bump ember-cli-htmlbars from 4.2.0 to 4.2.1 * Bump eslint-plugin-node from 10.0.0 to 11.0.0 * Bump nyc from 14.1.1 to 15.0.0 * Bump ember-resolver from 6.0.0 to 6.0.1 * Bump ember-template-lint from 1.11.1 to 1.12.1 * Bump eslint-plugin-ember from 7.7.1 to 7.7.2 * Bump ember-cli-babel from 7.13.0 to 7.13.2 * Bump ember-cli-htmlbars from 4.1.0 to 4.2.0 * Bump ember-try from 1.3.0 to 1.4.0 * Bump ember-cli-htmlbars from 4.0.9 to 4.1.0 * Bump ember-template-lint from 1.9.0 to 1.10.0 v4.0.9 / 2019-12-05 =================== * [Enhancement] Add a number of total tests, failed tests, passed tests, and skipped tests to a module metadata file (@choheekim) * [Enhancement] Update README.md corresponding to changes in the module metadata file contents (@choheekim) * [BugFix] Update yarn.lock to use latest version of core-js-compat (v.3.4.7) (@choheekim) * [BugFix] Fix process validation when registering callbacks for process.error & process.exit (@choheekim) * Bump ember-template-lint from 1.6.0 to 1.6.1 * Bump ember-qunit from 4.5.1 to 4.6.0 * Bump eslint-plugin-ember from 7.2.0 to 7.3.0 * Bump ember-load-initializers from 2.1.0 to 2.1.1 * Bump ember-template-lint from 1.6.1 to 1.8.1 * Bump ember-source from 3.13.3 to 3.14.1 * Bump chalk from 2.4.2 to 3.0.0 * Bump ember-export-application-global from 2.0.0 to 2.0.1 * Bump ember-template-lint from 1.8.1 to 1.8.2 * Bump ember-cli from 3.13.1 to 3.14.0 * Bump ember-resolver from 5.3.0 to 6.0.0 * Bump ember-cli-babel from 7.12.0 to 7.13.0 * Bump execa from 3.3.0 to 3.4.0 * Bump ember-source from 3.14.2 to 3.14.3 * Bump ember-template-lint from 1.8.2 to 1.9.0 * Bump ember-cli-htmlbars from 4.0.8 to 4.0.9 v4.0.5 / 2019-10-25 =================== * [BugFix] Validate process object is defined when registering event callbacks for process.error & process.exit (@choheekim) * [BugFix] Updates page title for dummy app to "Ember Exam" (@howie) * Bump rimraf from 2.7.1 to 3.0.0 * Bump ember-cli from 3.12.0 to 3.13.1 * Bump ember-cli-deploy-build from 1.1.1 to 2.0.0 * Bump mocha from 6.2.0 to 6.2.2 * Bump ember-template-lint from 1.5.3 to 1.6.0 * Bump ember-cli-babel from 7.11.1 to 7.12.0 * Bump ember-cli-inject-live-reload from 2.0.1 to 2.0.2 * Bump @ember/optional-features from 1.0.0 to 1.1.0 * Bump ember-cli-addon-docs from 0.6.14 to 0.6.15 * Bump ember-cli-htmlbars-inline-precompile from 2.1.0 to 3.0.1 * Bump ember-source from 3.10.2 to 3.13.3 * Bump eslint-plugin-node from 8.0.1 to 10.0.0 * Bump @ember/optional-features from 0.7.0 to 1.0.0 * Bump ember-cli-htmlbars from 3.1.0 to 4.0.8 * Bump eslint-plugin-ember from 6.10.1 to 7.2.0 v4.0.4 / 2019-09-30 =================== * [BugFix] Validate testem object is defined (@choheekim) * Bump ember-cli-babel from 7.11.0 to 7.11.1 v4.0.3 / 2019-09-24 =================== * [Feature] Introduce write-module-metadata-file (@choheekim) * Bump ember-resolver from 5.2.1 to 5.3.0 v4.0.2 / 2019-09-16 =================== * [BugFix] Ensure browserExitHandler is called for global errors (@step2yeung) * Bump ember-cli-deploy-git from 1.3.3 to 1.3.4 v4.0.1 / 2019-09-11 =================== * [Enhancement] Improve complete browser book keeping & improve request next module conditions (@step2yeung) * Bump sinon from 7.4.0 to 7.4.2 v4.0.0 / 2019-07-18 =================== * [Enhancement] Update to use node version >= 8 (@choheekim) * [Enhancement] Throw error when there are no matching tests with a given input by file-path and module-path (@choheekim) * [BugFix] Update yarn.lock to use v2.4.1 of ember-cli-addon-docs (@choheekim) * Bump ember-source from 3.10.1 to 3.10.2 * Bump eslint-plugin-ember from 6.6.0 to 6.7.0 * Bump semver from 6.1.1 to 6.1.2 * Bump testdouble from 3.12.0 to 3.12.2 v3.0.3 / 2019-06-18 =================== * [Feature] Introduce module-path-filter and test-file-path-filter in ember-exam (@choheekim) * Bump ember-source from 3.10.0 to 3.10.1 * Bump rsvp from 4.8.4 to 4.8.5 * Bump testdouble from 3.11.0 to 3.12.0 * Bump ember-cli-addon-docs from 0.6.11 to 0.6.13 * Bump ember-template-lint from 1.1.0 to 1.2.0 * Bump eslint-plugin-ember from 6.5.1 to 6.6.0 * Bump ember-cli-babel from 7.7.3 to 7.8.0 v3.0.2 / 2019-06-03 =================== * [Enhancement] Update documentation (Add Table of Contents) (@Vasanth-freshworks) * [Enhancement] Allow graceful exit when async iterator failes to get a module. Add emberExamExitOnError flag to hard fail (@step2yeung) * [BugFix] Remove duplicate nav entry (@samselikoff) * Bump ember-cli-addon-docs from 0.6.8 to 0.6.9 * Bump mocha from 6.1.2 to 6.1.3 * Bump ember-cli-addon-docs from 0.6.9 to 0.6.10 * Bump sinon from 7.3.1 to 7.3.2 * Bump eslint-plugin-ember from 6.3.0 to 6.4.1 * Bump ember-source from 3.9.0 to 3.9.1 * [Security] Bump jquery from 3.3.1 to 3.4.0 * Bump nyc from 13.3.0 to 14.1.1 * Bump ember-source from 3.9.1 to 3.10.0 * Bump fs-extra from 7.0.1 to 8.0.1 * Bump mocha from 6.1.3 to 6.1.4 * Bump ember-cli-addon-docs from 0.6.10 to 0.6.11 * Bump semver from 6.0.0 to 6.1.0 * Bump eslint-plugin-ember from 6.4.1 to 6.5.0 * Bump ember-try from 1.1.0 to 1.2.1 * Bump semver from 6.1.0 to 6.1.1 * Bump eslint-plugin-ember from 6.5.0 to 6.5.1 v3.0.1 / 2019-04-09 =================== * [Enhancement] Update documentation (@step2yeung) v3.0.0 / 2019-04-08 =================== * [Feature - Breaking] Introduce TestLoadBalancing (@choheekim) & (@step2yeung) You will need to **replace** the use of `start()` from `Ember-Qunit` or `Ember-Mocha` in `test-helper.js` with `start()` from `ember-exam`: ```js // test-helper.js import start from 'ember-exam/test-support/start'; // Options passed to `start` will be passed-through to ember-qunit or ember-mocha start(); ``` This breaking change was motivated by wanting to remove the monkey-patching, of ember-qunit and ember-mocha's test-loader, ember exam was doing. * [Bugfix] Ensure serialized test-execution browserId's are always treated as a string https://github.com/ember-cli/ember-exam/pull/233 * [Bugfix] fix breaking change: https://github.com/ember-cli/ember-exam/pull/242 (@step2yeung) * [Enhancement] Prettify test-execution.json (@step2yeung) * Bump ember-qunit from 4.4.0 to 4.4.1 (4 weeks ago) * Bump ember-resolver from 5.1.2 to 5.1.3 (4 weeks ago) * Bump testdouble from 3.10.0 to 3.11.0 (4 weeks ago) * Bump ember-cli-babel from 7.4.3 to 7.5.0 (4 weeks ago) * Bump ember-resolver from 5.1.1 to 5.1.2 (5 weeks ago) * Bump mocha from 6.0.0 to 6.0.1 (5 weeks ago) * Bump ember-cli-babel from 7.4.2 to 7.4.3 (5 weeks ago) * Bump ember-qunit from 4.3.0 to 4.4.0 (5 weeks ago) * Bump mocha from 5.2.0 to 6.0.0 (5 weeks ago) * Bump ember-source from 3.7.3 to 3.8.0 (5 weeks ago) * Bump sinon from 7.2.3 to 7.2.4 (5 weeks ago) * Bump nyc from 13.2.0 to 13.3.0 (6 weeks ago) * [Security] Bump handlebars from 4.0.12 to 4.1.0 (6 weeks ago) * Bump ember-cli-babel from 7.4.1 to 7.4.2 (6 weeks ago) * Bump ember-source from 3.7.2 to 3.7.3 (7 weeks ago) * Bump ember-qunit from 4.2.0 to 4.3.0 (7 weeks ago) * Bump nyc from 13.1.0 to 13.2.0 (7 weeks ago) * Bump testdouble from 3.9.3 to 3.10.0 (7 weeks ago) * Bump ember-cli-babel from 7.4.0 to 7.4.1 (8 weeks ago) * Bump eslint-plugin-ember from 6.1.0 to 6.2.0 (8 weeks ago) v2.1.5 / 2019-04-08 =================== * re-release 2.0.3 as 2.1.5, as 2.0.4...2.1.4 introduced a worth-while but unexpected breaking change. 2.0.4...2.1.4 will be re-released as 3.x v2.1.4 / 2019-03-27 =================== * [Bugfix] Ensure serialized test-execution browserId's are always treated as a string https://github.com/ember-cli/ember-exam/pull/233 v2.1.3 / 2019-03-27 =================== * [Bugfix] fix breaking change: https://github.com/ember-cli/ember-exam/pull/242 (@step2yeung) * [Enhancement] Prettify test-execution.json (@step2yeung) v2.1.0 / 2019-03-27 =================== * [Feature] Introduce TestLoadBalancing <@choheekim> & <@step2yeung> * Bump ember-qunit from 4.4.0 to 4.4.1 (4 weeks ago) * Bump ember-resolver from 5.1.2 to 5.1.3 (4 weeks ago) * Bump testdouble from 3.10.0 to 3.11.0 (4 weeks ago) * Bump ember-cli-babel from 7.4.3 to 7.5.0 (4 weeks ago) * Bump ember-resolver from 5.1.1 to 5.1.2 (5 weeks ago) * Bump mocha from 6.0.0 to 6.0.1 (5 weeks ago) * Bump ember-cli-babel from 7.4.2 to 7.4.3 (5 weeks ago) * Bump ember-qunit from 4.3.0 to 4.4.0 (5 weeks ago) * Bump mocha from 5.2.0 to 6.0.0 (5 weeks ago) * Bump ember-source from 3.7.3 to 3.8.0 (5 weeks ago) * Bump sinon from 7.2.3 to 7.2.4 (5 weeks ago) * Bump nyc from 13.2.0 to 13.3.0 (6 weeks ago) * [Security] Bump handlebars from 4.0.12 to 4.1.0 (6 weeks ago) * Bump ember-cli-babel from 7.4.1 to 7.4.2 (6 weeks ago) * Bump ember-source from 3.7.2 to 3.7.3 (7 weeks ago) * Bump ember-qunit from 4.2.0 to 4.3.0 (7 weeks ago) * Bump nyc from 13.1.0 to 13.2.0 (7 weeks ago) * Bump testdouble from 3.9.3 to 3.10.0 (7 weeks ago) * Bump ember-cli-babel from 7.4.0 to 7.4.1 (8 weeks ago) * Bump eslint-plugin-ember from 6.1.0 to 6.2.0 (8 weeks ago) v2.0.3 / 2019-01-22 =================== * ignore .nyc_output v2.0.2 / 2019-01-22 =================== * Bump chalk from 2.4.1 to 2.4.2 * Bump debug from 4.1.0 to 4.1.1 * Bump ember-cli from 3.5.1 to 3.7.1 * Bump ember-cli-babel from 7.1.4 to 7.4.0 * Bump ember-cli-dependency-checker from 3.0.0 to 3.1.0 * Bump ember-cli-htmlbars-inline-precompile from 2.0.0 to 2.1.0 * Bump ember-qunit from 4.1.2 to 4.2.0 * Bump ember-source from 3.6.0 to 3.7.2 * Bump ember-template-lint from 0.8.23 to 1.1.0 * Bump eslint-plugin-ember from 6.0.1 to 6.1.0 * Bump eslint-plugin-node from 8.0.0 to 8.0.1 * Bump rimraf from 2.6.2 to 2.6.3 * Bump sinon from 7.1.1 to 7.2.3 * Bump testdouble from 3.9.1 to 3.9.3 * Run test:all to trigger ember & node test in ci, add missing single quote, and change number of tests running * `setResolver()` from `@ember/test-helpers` v2.0.1 / 2018-12-07 =================== * ember-exam now sets `process.env.EMBER_EXAM_SPLIT_COUNT`, this allows testem scripts to pick up this configuration via `parallel: process.env.EMBER_EXAM_SPLIT_COUNT` v2.0.0 / 2018-12-04 =================== * Bump Node support to: ^6.14.0 || ^8.10.0 || >= 10.* * Update/Modernize all dependencies * Update/Modernize codebase * tranisition from ember-cli-qunit to ember-qunit v1.0.0 / 2017-11-02 ================== ================== * Remove auto-loading functionality * Update readme to better emphasize explicit loading v0.8.1 / 2017-10-08 ================== * Warn when auto-loading (deprecation) * Remove `#` from test output. v0.8.0 / 2017-10-04 ================== * Removed EMBER_TRY_SCENARIO's from .travis.yml file * Fix ESLint warning * Fix mocha integration * Revert `npm install` command in .travis.yml * Upgrade all dependencies version * Upgrade Ember CLI to version 2.15 and align with default blueprint v0.7.2 / 2017-10-01 ================== * fixes #109 - use local ember v0.7.1 / 2017-09-14 ================== * Make notes about turning on parallelization more visible * Move note on >= 0.7.0 into installation section * Add installation instructions * Remove jQuery usage * Specify when to call loadEmberExam when using ember-cli-qunit@4 * fix version range * Add release process notes v0.7.0 / 2017-06-01 ================== * Document load API for version 0.7.0 * Fix eslint errors for node-land code * Refactor core functionality * Extract TestLoader mods into utility function * Simplify and revamp code coverage * Fix tests from ESLint migration * Replace JSHint with ESLint * Tweak CI configs * Change ember try:one -> ember try:each * Remove Node 0.12 from Travis * Add Node LTS versions 4.x, 6.x, and stable to Travis v0.6.2 / 2017-04-09 ================== * Downgrade split < 2 error to warning * Fix mocha test commands v0.6.1 / 2017-03-25 =================== * Ensure iterate exits with proper code * Add Ember Exam video link to Readme * Add note about using random with a seed * Fix seed logging message for random option v0.6.0 / 2016-11-27 =================== * Close code coverage gap * Update README to include Mocha info * Add framework-specific logic * Run both Mocha and QUnit tests in CI * Add tests for ember-cli-mocha * Remove moduleForAcceptance * Move QUnit-based tests to sub-directory * Remove reliance on QUnit for handling url params v0.5.3 / 2016-11-19 =================== * Fixed issue with using a single partition with a double digit v0.5.2 / 2016-11-15 =================== * Support specifying multiple partitions (#63) v0.5.1 / 2016-11-14 =================== * move rimraf to dependencies from devDependencies * Add note about test splitting balancing v0.5.0 / 2016-08-14 =================== * Document randomization-iterator * Add tests for randomization-iterator * Rename main acceptance test to be semantic * Introduce exam:iterate command * Tighten up npmignore * Clarify README typos * Increase mass threshold for code climate * Improve acceptance test coverage * Improve advanced configuration section of readme v0.4.6 / 2016-08-07 =================== * Don't run Travis on non-master branches * Read in testem config for constructing test page urls v0.4.5 / 2016-08-03 =================== * Fix node tests after core-object changes * Fix tests of ember-exam in 2.7 * Upgrade all deps to align with Ember 2.7.0. * Temporarily undocument `--weighted`. * Setup and document ember-try integration v0.4.4 / 2016-06-21 =================== * Remove unused dependencies * Make codeclimate and eslint configs local * Make requires lazy where possible * Remove unused Array utilities * Add CodeClimate badges to README * Setup Istanbul code coverage for node code * Fix issues found via CodeClimate * Fix Travis badge to point to master * Add additional badges to README v0.4.3 / 2016-06-05 =================== * Add Acceptance test for Testem output * Add partition number to Testem output only when applicable * Handle _split and _partition params as strings * Fix typo, partition -> _partition v0.4.2 / 2016-06-02 =================== * Introduce tests for TestLoader * Add useful errors to TestLoader * Don't fail when lint tests are disabled v0.4.1 / 2016-05-24 =================== * Fix super callbacks context v0.4.0 / 2016-05-24 =================== * Remove AST manipulations and refine API ================================================ FILE: CONTRIBUTING.md ================================================ # How To Contribute ## Installation - `git clone https://github.com/ember-cli/ember-exam.git` - `cd ember-exam` - `yarn install` ## Linting - `yarn lint:hbs` - `yarn lint:js` - `yarn lint:js --fix` ## Running tests - `yarn test:ember` – Runs the test suite on the current Ember version - `yarn test:ember --server` – Runs the test suite in "watch mode" - `yarn test:node` - Runs the node tests - `yarn test:all` – Runs the test suite against multiple Ember versions ## Running the dummy application - `yarn start` - Visit the dummy application at [http://localhost:4200](http://localhost:4200). For more information on using ember-cli, visit [https://cli.emberjs.com/release/](https://cli.emberjs.com/release/). ## Debugging testem Terminal 1 ```bash pnpm ember exam --load-balance --path ./dist --parallel 2 --testem-debug testem.log ``` Terminal 2 ```bash tail -f testem.log ``` ================================================ FILE: LICENSE.md ================================================ The MIT License (MIT) Copyright (c) 2015 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # Ember Exam ![Build Status](https://github.com/ember-cli/ember-exam/actions/workflows/ci.yml/badge.svg?event=push) [![NPM Version](https://badge.fury.io/js/ember-exam.svg)](https://badge.fury.io/js/ember-exam) [![Ember Observer Score](https://emberobserver.com/badges/ember-exam.svg)](https://emberobserver.com/addons/ember-exam) Ember Exam is an addon to allow you more control over how you run your tests when used in conjunction with [ember-qunit](https://github.com/emberjs/ember-qunit). It provides the ability to randomize, split, parallelize, and load-balance your test suite by adding a more robust CLI command. It started as a way to help reduce flaky tests and encourage healthy test driven development. It's like [Head & Shoulders](http://www.headandshoulders.com/) for your tests! [![Introduction to Ember Exam](https://cloud.githubusercontent.com/assets/2922250/22800360/157ad67c-eed7-11e6-8d33-d2c59238c7f1.png)](https://embermap.com/video/ember-exam) The [documentation website](https://ember-cli.github.io/ember-exam/) contains examples and API information. ## Table of Contents - [Compatibility](#compatibility) - [Installation](#installation) - [How To Use](#how-to-use) * [Version < `3.0.0`](#version--300) * [Randomization](#randomization) + [Randomization Iterator](#randomization-iterator) * [Splitting](#splitting) + [Split Test Parallelization](#split-test-parallelization) * [Test Load Balancing](#test-load-balancing) - [Test Failure Reproduction](#test-failure-reproduction) * [Preserve Test Name](#preserve-test-name) - [Advanced Configuration](#advanced-configuration) * [Ember Try & CI Integration](#ember-try--ci-integration) * [Test Suite Segmentation](#test-suite-segmentation) * [Exceeding Browser Timeout](#exceeding-browser-timeout) ## Compatibility * Ember.js v4.8 or above * Ember CLI v4.8 or above * Node.js v18 or above ## Installation Installation is as easy as running: ```bash $ npm install --save-dev ember-exam ``` ## How To Use Using Ember Exam is fairly straightforward as it extends directly from the default Ember-CLI `test` command. So, by default, it will work exactly the same as `ember test`. ```bash $ ember exam $ ember exam --filter='acceptance' $ ember exam --server $ ember exam --load-balance --parallel=1 ``` For more information and examples, please visit the [documentation website](https://ember-cli.github.io/ember-exam/). ```bash # A value of filter is acceptance $ ember exam --filter 'acceptance' # A value of parallel is 2 $ ember exam --load-balance --parallel=2 --server # If a `=` is not used to pass a value to an option that requires a value, it will take anything passed after a space as it's value # In this instance, the value of parallel is --server $ ember exam --load-balance --parallel --server ``` The idea is that you can replace `ember test` with `ember exam` and never look back. To get the unique features of Ember Exam (described in-depth below), you will need to **replace** the use of `start()` from `ember-qunit` in `test-helper.js` with `start()` from `ember-exam`: ```js // test-helper.js - import { start, setupEmberOnerrorValidation } from 'ember-qunit'; + import { setupEmberOnerrorValidation } from 'ember-qunit'; + import { start } from 'ember-exam/test-support'; // Options passed to `start` will be passed-through to ember-qunit start(); ``` ## How to use with Vite All of the above applies, but we need to tell vite to build the app before telling ember/exam to run tests on that output. Update your test-helper.js to call the ember-exam `start` function: ```diff // ... import { setApplication } from '@ember/test-helpers'; import { setup } from 'qunit-dom'; - import { start as qunitStart, setupEmberOnerrorValidation } from 'ember-qunit'; + import { setupEmberOnerrorValidation } from 'ember-qunit'; + import { start as startEmberExam } from 'ember-exam/addon-test-support'; - export function start() { + export async function start(options) { setApplication(Application.create(config.APP)); setup(QUnit.assert); setupEmberOnerrorValidation(); - qunitStart(); + // Options passed to `start` will be passed-through to ember-qunit + await startEmberExam(options); } ``` or if you have a test-helper.ts: ```diff // ... import { setApplication } from '@ember/test-helpers'; import { setup } from 'qunit-dom'; - import { start as qunitStart, setupEmberOnerrorValidation } from 'ember-qunit'; + import { setupEmberOnerrorValidation } from 'ember-qunit'; + import { + start as startEmberExam, + type EmberExamStartOptions, + } from 'ember-exam/addon-test-support'; - export function start() { + export async function start(options: EmberExamStartOptions) { setApplication(Application.create(config.APP)); setup(QUnit.assert); setupEmberOnerrorValidation(); - qunitStart(); + // Options passed to `start` will be passed-through to ember-qunit + await startEmberExam(options); } ``` Then, update your tests/index.html to pass availableModules to start: ```html ``` Testing development: ```bash NODE_ENV=development vite build --mode development ember exam --path dist --config-file ./testem.cjs ``` Testing production: ```bash vite build --mode test ember exam --path dist --config-file ./testem.cjs ``` > [!NOTE] > Specifying the `--path` is important because otherwise ember-cli will try to build your vite app, and it will error. > [!NOTE] > Specifying the `--config-path` is important because ember-cli (what backs ember-exam) doesn't know about cjs files. ### Version < `3.0.0` Prior to `2.1.0`, Ember Exam must be loaded by importing `addon-test-support/load.js` and calling `loadEmberExam`: ```js // test-helper.js import loadEmberExam from 'ember-exam/test-support/load'; loadEmberExam(); ``` ### Randomization ```bash $ ember exam --random[=] ``` The `random` option allows you to randomize the order in which your tests run. You can optionally specify a "seed" value from which to randomize your tests in order to reproduce results. The seed can be any string value. Regardless of whether you specify a seed or not, Ember Exam will log the seed value used for the randomization at the beginning of the test run: ```bash $ ember exam --random $ Randomizing tests with seed: liv5d1ixkco6qlatl6o7mbo6r $ ember exam --random=this_is1337 $ Randomizing tests with seed: this_is1337 ``` If you use `random` without specifying a seed, it must be the last argument you pass. Otherwise, Ember Exam will attempt to interpret any following arguments as the seed value. In other words: ```bash # don't do this ember exam --random --split=2 Randomizing tests with seed: --split=2 # this is not what we wanted # do this instead ember exam --split=2 --random Randomizing tests with seed: hwr74nkk55vzpvi ``` _Note: You must be using QUnit version `1.23.0` or greater for this feature to work properly. #### Randomization Iterator Randomization can be helpful for identifying non-atomic or order-dependent tests. To that end, Ember Exam provides an iterator to make it easy to test lots of variations in your test suite order quickly. ```bash $ ember exam:iterate ``` This command will build your application once, and then run the test suite with the `random` option for the specified number of iterations. You can optionally skip the build by using a previous build via the `path` option: ```bash $ ember exam:iterate --path ``` Finally, you can pass additional options through to the exam command used to run the tests via the `options` flag: ```bash $ ember exam:iterate --options ``` The `options` should be a string matching what you would use via the CLI. ### Generating Module Metadata File For Test Execution ```bash $ ember exam --write-module-metadata-file $ ember exam --wmmf ``` The `--write-module-metadata-file`, `wmmf` as an alias, allows you to generate a module metadata file after a test run. The file provides metadata about the test modules executed. It creates a json file, `module-metadata-.json`, which contains an array of elements representing metadata of modules executed by sorted by ascending order: ```json [ { "moduleName": "Module-name", "total": "Total number of tests in the module", "passed": "A number of passed tests in the module", "failed": "A number of failed tests in the module", "skipped": "A number of skipped tests in the module", "duration": "ms in Total duration to execute the module", "failedTests": "A list of failed tests" } ] ``` and it looks something like below: ```json [ { "moduleName": "Slowest-module", "total": 12, "passed": 9, "failed": 1, "skipped": 2, "duration": 153, "failedTests": ["failed-test-1"] }, { "moduleName": "Fastest-module", "total": 2, "passed": 1, "failed": 0, "skipped": 0, "duration": 123, "failedTests": [] } ] ``` ### Splitting ```bash $ ember exam --split= ``` The `split` option allows you to specify the number of partitions greater than one to spread your tests across. Ember Exam will then proceed to run the first batch of tests. ```bash $ ember exam --split= --partition= ``` The `partition` option allows you to specify which test group to run after using the `split` option. It is one-indexed, so if you specify a split of 3, the last group you could run is 3 as well. You can also run multiple partitions, e.g.: ```bash $ ember exam --split=4 --partition=1 --partition=2 ``` _Note: Ember Exam splits tests by modifying the ember-qunit's `TestLoader` to bucket each test file into a partition, where each partition has an even number of test files. This makes it possible to have unbalanced partitions. To run your tests with balanced partitions, consider using `--load-balance`. For more info, see [_Test Load Balancing_](#test-load-balancing). #### Split Test Parallelization ```bash $ ember exam --split= --parallel ``` The `parallel` option allows you to run your split tests across multiple test pages in parallel in [Testem](https://github.com/testem/testem). It will use a separate browser instance for each group of tests. So, if you specify a split of 3, then 3 browser instances will be spawned with the output looking something like: ```bash ok 1 PhantomJS 1.9 - Exam Partition 1 - some test ok 2 PhantomJS 1.9 - Exam Partition 3 - some other other test ok 3 PhantomJS 1.9 - Exam Partition 2 - some other test ``` You can also combine the `parallel` option with the `partition` option to split tests, and then recombine partitions into parallel runs. This would, for example, allow you to run tests in multiple CI containers and have each CI container parallelize its list of tests. For example, if you wanted to run your tests across two containers, but have one of them run twice as many tests as the other, and run them in parallel, you could do this: ```bash # container 1 ember exam --split=3 --partition=1,2 --parallel ``` ```bash # container 2 ember exam --split=3 --partition=3 --parallel ``` **Note 1**: _Ember Exam will respect the `parallel` setting of your [Testem config file](https://github.com/testem/testem/blob/master/docs/config_file.md#config-level-options) while running tests in parallel. The default value for `parallel` in Testem is 1, which means you'll need a non-default value to actually see parallel behavior._ **Note 2**: _Ember Exam sets `process.env.EMBER_EXAM_SPLIT_COUNT` for convenience. You can use this in your Testem file._ **Note 3**: _You must be using Testem version `1.5.0` or greater for this feature to work properly._ ### Filtering Ember Exam provides options to filter test suites by two types - module path and test file path. ```bash $ ember exam --module-path= ``` The `module-path` option allows you to filter module paths by a given value. Module paths are mapped by test files and they are generated during `ember build`. After the build, `tests.js` file is created and it resides under /assets. The file is combined of all tests in an application and it has a form of `define("", others..`. The value for `module-path` can have either string or regular expression, for instance: ```bash # When module path value is string. This will run all modules which match with the passed value $ ember exam --module-path='dummy/tests/helpers/module-for-acceptance' # When module path value is regex. This will run all modules which have `dummy` in it $ ember exam --module-path='!/dummy/' ``` The `file-path` option is to filter tests by *test file path*. The test file path is a location of the test file in a file system. You can specify `file-path` to a location of specific test file path or you can use wildcards in paths to target multiple test files. ```bash # This will run tests that are defined in `/my-application/tests/unit/my-test.js` $ ember exam --file-path='/my-application/tests/unit/my-test.js' # This will run all test files that are under `/my-application/tests/unit/` $ ember exam --file-path='/my-application/tests/unit/*.js' ``` ### Test Load Balancing ```bash $ ember exam --parallel= --load-balance ``` The `load-balance` option allows you to load balance test files against multiple browsers. It will order the test files by test types, e.g. acceptance | integration | unit, and load balance the ordered test files between the browsers dynamically rather than statically. **Note:** parallel must be used along with load-balance to specify a number of browser(s) The `load-balance` option was added to version 1.1 to address execution performance when running against a large test suite. Web browsers and the testem server communicate via promise in order to send and receive test file. The promise timeout value is set to 15 seconds, and is configurable by adding `asyncTimeout=[timeout]` as a querystring param in the test URL or adding to the `test_page` option in the testem config. For example, if you specify `load-balance` and `parallel` equals 3, then three browser instances will be created and the output will look something like: ```bash # ember exam --parallel=3 --load-balance ok 1 Chrome 66.0 - Browser Id 1 - some test ok 2 Chrome 66.0 - Browser Id 2 - some another test ok 3 Chrome 66.0 - Browser Id 3 - some the other test ``` You can also specify the `split` and `partition` options with `load-balance` to load a portion of test modules on multiple CI containers. ```bash $ ember exam --split= --partition= --parallel= --load-balance ``` This command will split test files and load-balance tests from the specified partition across the browsers. For example `ember exam --split=2 --partition=1 --parallel=3 --load-balance`, the complete list of test files are split into two halves. With the first half of the list load balanced against three browsers. The output will look something like below: ```bash # ember exam --split=2 --partition=1 --parallel=3 --load-balance ok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test ok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test ok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test ``` **Important information on Load Balancing** 1. The `--load-balance` option is currently only supported in CI mode and for that reason no-launch cannot be used with load-balance. 2. You must be using `ember-cli` version 3.2.0 or greater for load balancing and test failure reproduction features to work properly. 3. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly. 4. You must be using `qunit` version 2.13.0 or greater for this feature to work properly. ##### Test Failure Reproduction Due to the dynamic nature of the load-balance option, test file execution order can vary between runs. In order to reproduce a past test execution, the execution must be recorded via passing --write-execution-file or --wef, which allows generating a JSON file that enables rerunning the past test execution. The option is only allowed when load-balance is passed. ```bash # The command will load in test balanced mode with of browser(s). After the test suite execution, it will generate a test-execution json file. $ ember exam --parallel= --load-balance --wef $ ember exam --parallel= --load-balance --write-execution-file ``` The file is stored in the root directory and the naming structure is `test-execution-.json`. To replay the test execution for particular browser(s), do the following: ```bash # The command will read a test execution file specified for `replay-execution` and execute a browser Id(s) from `replay-browser` $ ember exam --replay-execution=[string] --replay-browser=[num] ``` `replay-execution` allows you to specify a path to the json file to run execution against and `replay-browser` is to specify browser ID(s) to execute. ```bash # The command will read test-execution-000000.json and load the list of modules mapped to browserId 1 $ ember exam --replay-execution=test-execution-000000.json --replay-browser=1 ``` The above command will read `test-execution-000000.json` and load the list of modules which is mapped by browser ID #1. `replay-browser` can be an array of browser IDs. For instance `--replay-browser=1,2` will start two browsers and execute a list of modules which were previously run by browsers #1 and #2. ```bash # The command will read test-execution-000000.json and load the list of module mapped to browserId 1 and 2 $ ember exam --replay-execution=test-execution-000000.json --replay-browser=1,2 ``` When `replay-browser` value is not specified it will execute browserId(s) read from `failedBrowser` in the test execution file. ```bash # The command will read test-execution-000000.json and load the list of modules mapped to browserIds from failedBrowser in the json file. $ ember exam --replay-execution=test-execution-000000.json ``` When `replay-browser` value is not specified and there is no value for `failedBrowser` in the json file it will rerun all list of modules. ```bash # The command will read test-execution-000000.json and load the list of module mapped to all browserIds when failedBrowser is none in the json file $ ember exam --replay-execution=test-execution-000000.json ``` **Important information on `--replay-execution` and `--replay-browser`** 1. You must be using `ember-cli` version 3.2.0 or greater for load-balnce and test failure reproduction features to work properly. 2. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly. 3. You must be using `qunit` version 2.8.0 or greater for this feature to work properly. #### Preserve Test Name When using `--split` and/or `--load-balance` the output will look something like: ```bash # ember exam --split=2 --partition=1 --parallel=3 --load-balance ok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test ok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test ok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test ``` However, if you change the amount of parallelization, or randomize across partitions, the output will change for the same test, which may be an issue if you are tracking test insights over time. ```bash # ember exam --split=2 --partition=1 --parallel=2 --load-balance ok 1 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some test ok 2 Chrome 66.0 - Exam Partition 1 - browser Id 1 - another test ok 3 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some the other test ``` You can add `--preserve-test-name` to remove the dynamic segments of the output (partition and browser) to ensure the output test names are always the same. ```bash # ember exam --split=2 --partition=1 --parallel=3 --load-balance --preserve-test-name ok 1 Chrome 66.0 - some test ok 2 Chrome 66.0 - another test ok 3 Chrome 66.0 - some the other test ``` ## Advanced Configuration Ember Exam does its best to allow you to run your test suite in a way that is effective for your individual needs. To that end, there are lots of advanced ways to configure your setup by integrating with other aspects of the Ember testing environment. The following sections will cover a few of the more common scenarios. ### Ember Try & CI Integration Integrating ember-exam with [ember-try](https://github.com/ember-cli/ember-try) is remarkably easy. Define a [`command` in your `ember-try.js` config](https://github.com/ember-cli/ember-try#configuration-files) that leverages the `exam` command: ```js // config/ember-try.js module.exports = { command: 'ember exam --split 3 --parallel', // ... }; ``` Using [environmental variables](https://nodejs.org/api/process.html#process_process_env) gives you flexibility in how you run your tests. For instance, you could distribute your tests across processes instead of parallelizing them by specifying a `PARTITION` variable in your process environment and then consuming it like so: ```js module.exports = { command: 'ember exam --split 20 --partition ' + process.env.PARTITION, // ... }; ``` If you are working with [Travis CI](https://travis-ci.org/) then you can also easily set up seeded-random runs based on PR numbers. Similar to the following: ```js const command = [ 'ember', 'exam', '--random' ]; const pr = process.env.TRAVIS_PULL_REQUEST; if (pr) { command.push(pr); } module.exports = { command: command.join(' '), // ... }; ``` You can refer to [Travis' default environment variables](https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables) to see what else you could possibly leverage for your test setup. ### Test Suite Segmentation Some test suites like to segment which tests run based on various facets such as type of test, feature being tested, and so on. This can be accomplished by leveraging Testem's ability to have multiple test pages: ```json { "test_page": [ "tests/index.html?filter=acceptance", "tests/index.html?filter=!acceptance" ] } ``` You can use this feature in conjunction with Ember Exam's features, which will allow you to segment your test suite but still gain benefits from randomization and splitting. ### Exceeding Browser Timeout If you have a lot of tests you may run into a timeout error, especially in CI environments with constrained resources. ``` Error: Browser timeout exceeded: 10s ``` You can work around this by increasing `browser_disconnect_timeout` in testem.js: ```js module.exports = { browser_disconnect_timeout: 30, }; ``` ================================================ FILE: RELEASE.md ================================================ # Release Process Releases in this repo are mostly automated using [release-plan](https://github.com/embroider-build/release-plan/). Once you label all your PRs correctly (see below) you will have an automatically generated PR that updates your CHANGELOG.md file and a `.release-plan.json` that is used to prepare the release once the PR is merged. ## Preparation Since the majority of the actual release process is automated, the remaining tasks before releasing are: - correctly labeling **all** pull requests that have been merged since the last release - updating pull request titles so they make sense to our users Some great information on why this is important can be found at [keepachangelog.com](https://keepachangelog.com/en/1.1.0/), but the overall guiding principle here is that changelogs are for humans, not machines. When reviewing merged PR's the labels to be used are: - breaking - Used when the PR is considered a breaking change. - enhancement - Used when the PR adds a new feature or enhancement. - bug - Used when the PR fixes a bug included in a previous release. - documentation - Used when the PR adds or updates documentation. - internal - Internal changes or things that don't fit in any other category. **Note:** `release-plan` requires that **all** PRs are labeled. If a PR doesn't fit in a category it's fine to label it as `internal` ## Release Once the prep work is completed, the actual release is straight forward: you just need to merge the open [Plan Release](https://github.com/ember-cli/ember-exam/pulls?q=is%3Apr+is%3Aopen+%22Prepare+Release%22+in%3Atitle) PR ================================================ FILE: addon-test-support/-private/async-iterator.js ================================================ 'use strict'; const iteratorCompleteResponse = { done: true, value: null }; /** * A class to iterate a sequencial set of asynchronous events. * * @class AsyncIterator */ export default class AsyncIterator { constructor(testem, options) { this._testem = testem; this._request = options.request; this._response = options.response; this._done = false; this._current = null; this._boundHandleResponse = this.handleResponse.bind(this); this._waiting = false; // Set a timeout value from either url parameter or default timeout value, 15 s. this._timeout = options.timeout || 15; this._browserId = options.browserId; this._emberExamExitOnError = options.emberExamExitOnError; testem.on(this._response, this._boundHandleResponse); } /** * Indicates whether the response queue is done or not. * * @method done * @return {bool} whether the response queue is done or not */ get done() { return this._done; } /** * @method toString * @return {String} the stringified value of the iterator. */ toString() { return ``; } /** * Handle a response when it's waiting for a response * * @method handleResponse * @param {*} response */ handleResponse(response) { if (this._waiting === false) { throw new Error( `${this.toString()} Was not expecting a response, but got a response`, ); } else { this._waiting = false; } try { if (response.done) { this.dispose(); } this._current.resolve(response); } catch (e) { this._current.reject(e); } finally { this._current = null; if (this.timer) { clearTimeout(this.timer); } } } /** * Dispose when an iteration is finished. * * @method dispose */ dispose() { this._done = true; this._testem.removeEventCallbacks( this._response, this._boundHandleResponse, ); } /** * Emit the current request. * * @method _makeNextRequest */ _makeNextRequest() { this._waiting = true; this._testem.emit(this._request, this._browserId); } /** * Set a timeout to reject a promise if it doesn't get response within the timeout threshold. * * @method _setTimeout * @param {*} resolve */ _setTimeout(resolve, reject) { clearTimeout(this.timeout); this.timer = setTimeout(() => { if (!this._waiting) { return; } if (this._emberExamExitOnError) { let err = new Error( `EmberExam: Promise timed out after ${this._timeout} s while waiting for response for ${this._request}`, ); reject(err); } else { console.error( `EmberExam: Promise timed out after ${this._timeout} s while waiting for response for ${this._request}. Closing browser to exit gracefully.`, ); resolve(iteratorCompleteResponse); } }, this._timeout * 1000); } /** * Gets the next response from the request and resolve the promise. * if it's end of the iteration resolve the promise with done being true. * * @method next * @return {Promise} */ next() { if (this._done) { return Promise.resolve(iteratorCompleteResponse); } if (this._current) { return this._current.promise; } let resolve, reject; let promise = new Promise((_resolve, _reject) => { resolve = _resolve; reject = _reject; this._setTimeout(resolve, reject); }); this._current = { resolve, reject, promise, }; this._makeNextRequest(); return promise; } } ================================================ FILE: addon-test-support/-private/ember-exam-test-loader.js ================================================ import { assert } from '@ember/debug'; import getUrlParams from './get-url-params'; import splitTestModules from './split-test-modules'; import weightTestModules from './weight-test-modules'; import { filterTestModules } from './filter-test-modules'; import { TestLoader } from 'ember-qunit/test-loader'; import AsyncIterator from './async-iterator'; import QUnit from 'qunit'; /** * EmberExamQUnitTestLoader allows delayed requiring of test modules to enable test load balancing * It extends ember-qunit/test-loader used by `ember test`, since it overrides moduleLoadFailure() * to log a test failure when a module fails to load * @class EmberExamQUnitTestLoader * @extends {TestLoader} */ export default class EmberExamTestLoader extends TestLoader { constructor(testem, urlParams, qunit = QUnit) { super(); this._testModules = []; this._testem = testem; this._qunit = qunit; this._urlParams = urlParams || getUrlParams(); } get urlParams() { return this._urlParams; } /** * ember-cli-test-loader instantiates a new TestLoader instance and calls loadModules. * EmberExamQUnitTestLoader does not support load() in favor of loadModules(). * * @method load */ static load() { throw new Error("`EmberExamQUnitTestLoader` doesn't support `load()`."); } /** * require() collects the full list of modules before requiring each module with * super.require(), instead of requiring and unseeing a module when each gets loaded. * * @method require * @param {string} moduleName */ require(moduleName) { this._testModules.push(moduleName); } /** * Make unsee a no-op to avoid any unwanted resets * * @method unsee */ unsee() {} /** * Loads the test modules depending on the urlParam * * @method loadModules */ async loadModules({ availableModules } = {}) { const loadBalance = this._urlParams.get('loadBalance'); const browserId = this._urlParams.get('browser'); const modulePath = this._urlParams.get('modulePath'); const filePath = this._urlParams.get('filePath'); let partitions = this._urlParams.get('partition'); let split = parseInt(this._urlParams.get('split'), 10); split = isNaN(split) ? 1 : split; if (partitions === undefined) { partitions = [1]; } else if (!Array.isArray(partitions)) { partitions = [partitions]; } if (!availableModules) { super.loadModules(); } else { assert( `Available modules must be an object.`, typeof availableModules === 'object', ); this._availableModules = availableModules; this._testModules = Object.keys(availableModules); } this.setupModuleMetadataHandler(); if (modulePath || filePath) { this._testModules = filterTestModules( this._testModules, modulePath, filePath, ); } if (loadBalance && this._testem) { this.setupLoadBalanceHandlers(); this._testModules = splitTestModules( weightTestModules(this._testModules), split, partitions, ); this._testem.emit( 'testem:set-modules-queue', this._testModules, browserId, ); } else { this._testModules = splitTestModules( this._testModules, split, partitions, ); if (this._availableModules) { await this.loadAvailableModules(); return; } /** * Legacy support */ this._testModules.forEach((moduleName) => { super.require(moduleName); super.unsee(moduleName); }); } } /** * availableModules are passed in from loadModules * from loadEmberExam * from start */ async loadAvailableModules() { if (this._availableModules) { await Promise.all( this._testModules.map(async (moduleName) => { let loader = this._availableModules[moduleName]; /** * If it's not a function, it's already loaded */ if (typeof loader === 'function') { await loader(); } }), ); } } /** * Allow loading one module at a time. * * @method loadIndividualModule * @param {string} moduleName */ async loadIndividualModule(moduleName) { if (moduleName === undefined) { throw new Error( 'Failed to load a test module. `moduleName` is undefined in `loadIndividualModule`.', ); } if (this._availableModules) { let loader = this._availableModules[moduleName]; /** * If it's not a function, it's already loaded */ if (typeof loader === 'function') { await loader(); } return; } super.require(moduleName); super.unsee(moduleName); } /** * setupModuleMetadataHandler() register QUnit callback to enable generating module metadata file. * * @method setupModuleMetadataHandler */ setupModuleMetadataHandler() { this._qunit.testDone((metadata) => { if (typeof this._testem !== 'undefined' && this._testem !== null) { // testem:test-done-metadata is sent to server to track test module details. // metadata contains name, module, failed, passed, total, duration, skipped, and todo. // https://api.qunitjs.com/callbacks/QUnit.testDone this._testem.emit('testem:test-done-metadata', metadata); } }); } /** * setupLoadBalanceHandlers() registers QUnit callbacks needed for the load-balance option. * * @method setupLoadBalanceHandlers */ setupLoadBalanceHandlers() { // nextModuleAsyncIterator handles the async testem events // it returns an element of {value: , done: boolean} const nextModuleAsyncIterator = new AsyncIterator(this._testem, { request: 'testem:next-module-request', response: 'testem:next-module-response', timeout: this._urlParams.get('asyncTimeout'), browserId: this._urlParams.get('browser'), emberExamExitOnError: this._urlParams.get('_emberExamExitOnError'), }); const nextModuleHandler = () => { // if there are already tests queued up, don't request next module // this is possible if a test file has multiple qunit modules if (this._qunit.config.queue.length > 0) { return; } return nextModuleAsyncIterator .next() .then(async (response) => { if (!response.done) { const moduleName = response.value; await this.loadIndividualModule(moduleName); // if no tests were added, request the next module if (this._qunit.config.queue.length === 0) { return nextModuleHandler(); } } }) .catch((e) => { if ( typeof e === 'object' && e !== null && typeof e.message === 'string' ) { e.message = `EmberExam: Failed to get next test module: ${e.message}`; } throw new Error(`EmberExam: Failed to get next test module: ${e}`); }); }; // it registers qunit begin callback to ask for a next test moudle to execute when the test suite begins. // By default ember-qunit adds `Ember.onerror` test to a qunit processing queue and once the test is complete it execute _qunit.moduleDone callback. // However, when `setupEmberOnerrorValidation: false` is passed the test is disabled and _qunit.begin callback needs to request a next test module to run. this._qunit.begin(() => { return nextModuleHandler(); }); this._qunit.moduleDone(() => { return nextModuleHandler(); }); } } ================================================ FILE: addon-test-support/-private/filter-test-modules.js ================================================ // A regular expression to help parsing a string to verify regex. const MODULE_PATH_REGEXP = /^(!?)\/(.*)\/(i?)$/; const TEST_PATH_REGEX = /\/tests\/(.*?)$/; /** * Return the matched test. * e.g. if an input is '!/weight/' it returns an array, ['!/weight/', '!', 'weight', '']; * * @function getRegexFilter * @param {*} modulePath */ function getRegexFilter(modulePath) { return MODULE_PATH_REGEXP.exec(modulePath); } /** * Determine if a given module path is matched with module filter with wildcard. * e.g. A given moduleFilter, /tests/integration/*, matches with /tests/integration/foo and /tests/integration/bar * * @function wildcardFilter * @param {*} module * @param {*} moduleFilter */ function wildcardFilter(module, moduleFilter) { // Generate a regular expression to handle wildcard from path filter const moduleFilterRule = [ '^.*', moduleFilter.split('*').join('.*'), '$', ].join(''); return new RegExp(moduleFilterRule).test(module); } /** * Return a list of test modules that contain a given module path string. * * @function stringFilter * @param {Array} modules * @param {string} moduleFilter */ function stringFilter(modules, moduleFilter) { return modules.filter( (module) => module.includes(moduleFilter) || wildcardFilter(module, moduleFilter), ); } /** * Return a list of test modules that matches with a given regular expression. * * @function regexFilter * @param {Array} modules * @param {Array} modulePathRegexFilter */ function regexFilter(modules, modulePathRegexFilter) { const re = new RegExp(modulePathRegexFilter[2], modulePathRegexFilter[3]); const exclude = modulePathRegexFilter[1]; return modules.filter( (module) => (!exclude && re.test(module)) || (exclude && !re.test(module)), ); } /** * Return a module path that's mapped by a given test file path. * * @function convertFilePathToModulePath * @param {*} filePath */ function convertFilePathToModulePath(filePath) { const filePathWithNoExtension = filePath.replace(/\.[^/.]+$/, ''); const testFilePathMatch = TEST_PATH_REGEX.exec(filePathWithNoExtension); if (typeof filePath !== 'undefined' && testFilePathMatch !== null) { return testFilePathMatch[0]; } return filePathWithNoExtension; } /** * Returns a list of test modules that match with the given module path filter or test file path. * * @function filterTestModules * @param {Array} modules * @param {string} modulePath * @param {string} filePath */ function filterTestModules(modules, modulePath, filePath) { // Generates an array with module filter value seperated by comma (,). const moduleFilters = (filePath || modulePath) .split(',') .map((value) => value.trim()); const filteredTestModules = moduleFilters.reduce((result, moduleFilter) => { const modulePath = convertFilePathToModulePath(moduleFilter); const modulePathRegex = getRegexFilter(modulePath); if (modulePathRegex) { return result.concat( regexFilter(modules, modulePathRegex).filter( (module) => result.indexOf(module) === -1, ), ); } else { return result.concat( stringFilter(modules, modulePath).filter( (module) => result.indexOf(module) === -1, ), ); } }, []); if (filteredTestModules.length === 0) { throw new Error( `No tests matched with the filter: ${modulePath || filePath}.`, ); } return filteredTestModules; } export { convertFilePathToModulePath, filterTestModules }; ================================================ FILE: addon-test-support/-private/get-url-params.js ================================================ function decodeQueryParam(param) { return decodeURIComponent(param.replace(/\+/g, '%20')); } /** * Parses the url and return an object containing a param's key and value * * @export * @function getUrlParams * @return {Object} urlParams */ export default function getUrlParams() { const urlParams = new Map(); const params = location.search.slice(1).split('&'); for (let i = 0; i < params.length; i++) { if (params[i]) { const param = params[i].split('='); const name = decodeQueryParam(param[0]); // Allow just a key to turn on a flag, e.g., test.html?noglobals const value = param.length === 1 || decodeQueryParam(param.slice(1).join('=')); if (urlParams.has(name)) { urlParams.set(name, [].concat(urlParams.get(name), value)); } else { urlParams.set(name, value); } } } return urlParams; } ================================================ FILE: addon-test-support/-private/patch-testem-output.js ================================================ /* globals Testem */ /** * Returns a modified test name including browser or partition information * * @function updateTestName * @param {Map} urlParams * @param {string} testName * @return {string} testName */ export function updateTestName(urlParams, testName) { const split = urlParams.get('split'); const loadBalance = urlParams.get('loadBalance'); const partition = urlParams.get('partition') || 1; const browser = urlParams.get('browser') || 1; const preserveTestName = !!urlParams.get('preserveTestName'); if (preserveTestName) { return testName; } else if (split && loadBalance) { testName = `Exam Partition ${partition} - Browser Id ${browser} - ${testName}`; } else if (split) { testName = `Exam Partition ${partition} - ${testName}`; } else if (loadBalance) { testName = `Browser Id ${browser} - ${testName}`; } return testName; } /** * Setup testem test-result event to update the test name when a test completes * * @function patchTestemOutput * @param {Map} urlParams */ export function patchTestemOutput(urlParams) { Testem.on('test-result', (test) => { test.name = updateTestName(urlParams, test.name); }); } ================================================ FILE: addon-test-support/-private/split-test-modules.js ================================================ function createGroups(num) { const groups = new Array(num); for (let i = 0; i < num; i++) { groups[i] = []; } return groups; } function splitIntoGroups(arr, numGroups) { const groups = createGroups(numGroups); for (let i = 0; i < arr.length; i++) { groups[i % numGroups].push(arr[i]); } return groups; } /** * Splits the list of modules into unique subset of modules * return the subset indexed by the partition * * @export * @function splitTestModules * @param {Array} modules * @param {number} split * @param {number} partitions * @return {Array} tests */ export default function splitTestModules(modules, split, partitions) { if (split < 1) { throw new Error('You must specify a split greater than 0'); } const testGroups = splitIntoGroups(modules, split); const tests = []; for (let i = 0; i < partitions.length; i++) { const partition = parseInt(partitions[i], 10); if (isNaN(partition)) { throw new Error( "You must specify numbers for partition (you specified '" + partitions + "')", ); } if (split < partition) { throw new Error( 'You must specify partitions numbered less than or equal to your split value of ' + split, ); } else if (partition < 1) { throw new Error('You must specify partitions numbered greater than 0'); } const group = partition - 1; tests.push(...testGroups[group]); } return tests; } ================================================ FILE: addon-test-support/-private/weight-test-modules.js ================================================ const TEST_TYPE_WEIGHT = { unit: 10, integration: 20, acceptance: 150, }; const WEIGHT_REGEX = /\/(unit|integration|acceptance)\//; const DEFAULT_WEIGHT = 50; /** * Return the weight for a given module name, a file path to the module * Ember tests consist of Acceptance, Integration, and Unit tests. In general, acceptance takes * longest time to execute, followed by integration and unit. * The weight assigned to a module corresponds to its test type execution speed, with slowest being the highest in weight. * If the test type is not identifiable from the modulePath, weight default to 50 (ordered after acceptance, but before integration) * * @function getWeight * @param {string} modulePath File path to a module */ function getWeight(modulePath) { const [, key] = WEIGHT_REGEX.exec(modulePath) || []; if (typeof TEST_TYPE_WEIGHT[key] === 'number') { return TEST_TYPE_WEIGHT[key]; } else { return DEFAULT_WEIGHT; } } /** * Returns the list of modules sorted by its weight * * @export * @function weightTestModules * @param {Array} modules * @return {Array} */ export default function weightTestModules(modules) { const groups = new Map(); modules.forEach((module) => { const moduleWeight = getWeight(module); let moduleWeightGroup = groups.get(moduleWeight); if (Array.isArray(moduleWeightGroup)) { moduleWeightGroup.push(module); } else { moduleWeightGroup = [module]; } groups.set(moduleWeight, moduleWeightGroup); }); // return modules sorted by weight and alphabetically within its weighted groups return Array.from(groups.keys()) .sort((a, b) => b - a) .reduce((accumulatedArray, weight) => { const sortedModuleArr = groups.get(weight).sort(); return accumulatedArray.concat(sortedModuleArr); }, []); } ================================================ FILE: addon-test-support/index.d.ts ================================================ import { QUnitStartOptions } from 'ember-qunit'; export type EmberExamStartOptions = Omit & { availableModules: Record; }; export function start(options: EmberExamStartOptions): Promise; ================================================ FILE: addon-test-support/index.js ================================================ export { default as start } from './start'; ================================================ FILE: addon-test-support/load.js ================================================ import EmberExamTestLoader from './-private/ember-exam-test-loader'; import { patchTestemOutput } from './-private/patch-testem-output'; let loaded = false; /** * Setup EmberExamTestLoader to enable ember exam functionalities * * @function loadEmberExam * @return {*} testLoader */ export default function loadEmberExam() { if (loaded) { console.warn('Attempted to load Ember Exam more than once.'); return; } loaded = true; const testLoader = new EmberExamTestLoader(window.Testem); if (window.Testem) { patchTestemOutput(testLoader.urlParams); } return testLoader; } ================================================ FILE: addon-test-support/start.js ================================================ import loadEmberExam from './load'; import { start as qunitStart } from 'ember-qunit'; /** * Equivalent to ember-qunit's loadTest() except this does not create a new TestLoader instance * * @function loadTests * @param {*} testLoader * @param {*} loaderOptions */ async function loadTests(testLoader, loaderOptions = {}) { if (testLoader === undefined) { throw new Error( 'A testLoader instance has not been created. You must call `loadEmberExam()` before calling `loadTest()`.', ); } await testLoader.loadModules(loaderOptions); } /** * Ember-exam's own start function to set up EmberExamTestLoader, load tests and calls start() from * ember-qunit * * @function start * @param {*} qunitOptions */ export default async function start(qunitOptions = {}) { const { availableModules, ...modifiedOptions } = qunitOptions || Object.create(null); modifiedOptions.loadTests = false; const testLoader = loadEmberExam(); await loadTests(testLoader, { availableModules }); qunitStart(modifiedOptions); } ================================================ FILE: docs-app/.gitignore ================================================ dist/ node_modules/ .vitepress/dist .vitepress/cache ================================================ FILE: docs-app/.vitepress/config.mts ================================================ import { defineConfig } from 'vitepress' // https://vitepress.dev/reference/site-config export default defineConfig({ title: "ember-exam", description: "Run your tests with randomization, splitting, and parallelization for beautiful tests.", base: '/ember-exam/', markdown: { // theme: { // ...dark, // settings: [ // { // scope: 'comment', // settings: { // // 'foreground': 'rgb(200, 200, 200)' // } // }, // ] // }, }, themeConfig: { // https://vitepress.dev/reference/default-theme-config nav: [ { text: 'Home', link: '/' }, // { text: 'Examples', link: '/markdown-examples' } ], sidebar: [ { text: 'Options', items: [ { text: 'Randomization', link: '/randomization' }, { text: 'Randomization Iterator', link: '/randomization-iterator' }, { text: 'Generating Module Metadata For Test Execution', link: '/module-metadata' }, { text: 'Splitting', link: '/splitting' }, { text: 'Split Test Parallelization', link: '/split-parallel' }, { text: 'Filtering', link: '/filtering' }, { text: 'Test Load Balancing', link: '/load-balancing' }, ] }, { text: 'Advanced Configuration', items: [ { text: 'Ember Try & CI Integration', link: '/ember-try-and-ci' }, { text: 'Test Suite Segmentation', link: '/test-suite-segmentation' }, ] } ], socialLinks: [ { icon: 'github', link: 'https://github.com/ember-cli/ember-exam' } ] } }) ================================================ FILE: docs-app/.vitepress/theme/index.ts ================================================ // https://vitepress.dev/guide/custom-theme import { h } from 'vue' import type { Theme } from 'vitepress' import DefaultTheme from 'vitepress/theme' import './style.css' export default { extends: DefaultTheme, Layout: () => { return h(DefaultTheme.Layout, null, { // https://vitepress.dev/guide/extending-default-theme#layout-slots }) }, enhanceApp({ app, router, siteData }) { // ... } } satisfies Theme ================================================ FILE: docs-app/.vitepress/theme/style.css ================================================ /** * Customize default theme styling by overriding CSS variables: * https://github.com/vuejs/vitepress/blob/main/src/client/theme-default/styles/vars.css */ /** * Colors * * Each colors have exact same color scale system with 3 levels of solid * colors with different brightness, and 1 soft color. * * - `XXX-1`: The most solid color used mainly for colored text. It must * satisfy the contrast ratio against when used on top of `XXX-soft`. * * - `XXX-2`: The color used mainly for hover state of the button. * * - `XXX-3`: The color for solid background, such as bg color of the button. * It must satisfy the contrast ratio with pure white (#ffffff) text on * top of it. * * - `XXX-soft`: The color used for subtle background such as custom container * or badges. It must satisfy the contrast ratio when putting `XXX-1` colors * on top of it. * * The soft color must be semi transparent alpha channel. This is crucial * because it allows adding multiple "soft" colors on top of each other * to create a accent, such as when having inline code block inside * custom containers. * * - `default`: The color used purely for subtle indication without any * special meanings attached to it such as bg color for menu hover state. * * - `brand`: Used for primary brand colors, such as link text, button with * brand theme, etc. * * - `tip`: Used to indicate useful information. The default theme uses the * brand color for this by default. * * - `warning`: Used to indicate warning to the users. Used in custom * container, badges, etc. * * - `danger`: Used to show error, or dangerous message to the users. Used * in custom container, badges, etc. * -------------------------------------------------------------------------- */ :root { --vp-c-default-1: var(--vp-c-gray-1); --vp-c-default-2: var(--vp-c-gray-2); --vp-c-default-3: var(--vp-c-gray-3); --vp-c-default-soft: var(--vp-c-gray-soft); --vp-c-brand-1: var(--vp-c-indigo-1); --vp-c-brand-2: var(--vp-c-indigo-2); --vp-c-brand-3: var(--vp-c-indigo-3); --vp-c-brand-soft: var(--vp-c-indigo-soft); --vp-c-tip-1: var(--vp-c-brand-1); --vp-c-tip-2: var(--vp-c-brand-2); --vp-c-tip-3: var(--vp-c-brand-3); --vp-c-tip-soft: var(--vp-c-brand-soft); --vp-c-warning-1: var(--vp-c-yellow-1); --vp-c-warning-2: var(--vp-c-yellow-2); --vp-c-warning-3: var(--vp-c-yellow-3); --vp-c-warning-soft: var(--vp-c-yellow-soft); --vp-c-danger-1: var(--vp-c-red-1); --vp-c-danger-2: var(--vp-c-red-2); --vp-c-danger-3: var(--vp-c-red-3); --vp-c-danger-soft: var(--vp-c-red-soft); } /** * Component: Button * -------------------------------------------------------------------------- */ :root { --vp-button-brand-border: transparent; --vp-button-brand-text: var(--vp-c-white); --vp-button-brand-bg: var(--vp-c-brand-3); --vp-button-brand-hover-border: transparent; --vp-button-brand-hover-text: var(--vp-c-white); --vp-button-brand-hover-bg: var(--vp-c-brand-2); --vp-button-brand-active-border: transparent; --vp-button-brand-active-text: var(--vp-c-white); --vp-button-brand-active-bg: var(--vp-c-brand-1); } /** * Component: Home * -------------------------------------------------------------------------- */ :root { --vp-home-hero-name-color: transparent; --vp-home-hero-name-background: -webkit-linear-gradient( 120deg, #bd34fe 30%, #41d1ff ); --vp-home-hero-image-background-image: linear-gradient( -45deg, #bd34fe 50%, #47caff 50% ); --vp-home-hero-image-filter: blur(44px); } @media (min-width: 640px) { :root { --vp-home-hero-image-filter: blur(56px); } } @media (min-width: 960px) { :root { --vp-home-hero-image-filter: blur(68px); } } /** * Component: Custom Block * -------------------------------------------------------------------------- */ :root { --vp-custom-block-tip-border: transparent; --vp-custom-block-tip-text: var(--vp-c-text-1); --vp-custom-block-tip-bg: var(--vp-c-brand-soft); --vp-custom-block-tip-code-bg: var(--vp-c-brand-soft); } /** * Component: Algolia * -------------------------------------------------------------------------- */ .DocSearch { --docsearch-primary-color: var(--vp-c-brand-1) !important; } .badges { > p { display: flex; gap: 1rem; justify-content: center; align-items: center; } img, a { display: inline-flex; } } /** * Contrast fixes */ html { --vp-c-text-2: rgb(40, 40, 40); } html.dark { --vp-c-text-2: rgb(200, 200, 200); } html [class*='language-'] > span.lang { --vp-code-lang-color: rgb(40,40,40); } html.dark [class*='language-'] > span.lang { --vp-code-lang-color: rgb(200,200,200); } ================================================ FILE: docs-app/ember-try-and-ci.md ================================================ ### Ember Try & CI Integration Integrating ember-exam with [ember-try](https://github.com/ember-cli/ember-try) is remarkably easy. Define a [`command` in your `ember-try.js` config](https://github.com/ember-cli/ember-try#configuration-files) that leverages the `exam` command: ```js // config/ember-try.js module.exports = { command: 'ember exam --split 3 --parallel', // ... }; ``` Using [environmental variables](https://nodejs.org/api/process.html#process_process_env) gives you flexibility in how you run your tests. For instance, you could distribute your tests across processes instead of parallelizing them by specifying a `PARTITION` variable in your process environment and then consuming it like so: ```js module.exports = { command: 'ember exam --split 20 --partition ' + process.env.PARTITION, // ... }; ``` If you are working with [Travis CI](https://travis-ci.org/) then you can also easily set up seeded-random runs based on PR numbers. Similar to the following: ```js const command = ['ember', 'exam', '--random']; const pr = process.env.TRAVIS_PULL_REQUEST; if (pr) { command.push(pr); } module.exports = { command: command.join(' '), // ... }; ``` You can refer to [Travis' default environment variables](https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables) to see what else you could possibly leverage for your test setup. ================================================ FILE: docs-app/filtering.md ================================================ ### Filtering Ember Exam provides options to filter test suites by two types - module path and test file path. ```bash $ ember exam --module-path= ``` #### For Vite Apps The `file-path` option allows you to filter modules by the given relative path that is generated from `import.meta.glob(...)` in your `tests/index.html`. ```bash # This will run tests that are defined in `/my-application/tests/unit/my-test.js` $ ember exam --file-path='/my-application/tests/unit/my-test.js' # This will run all test files that are under `/my-application/tests/unit/` $ ember exam --file-path='/my-application/tests/unit/*.js' ``` #### For non-Vite Apps The `module-path` option allows you to filter module paths by a given value. Module paths are mapped by test files and they are generated during `ember build`. After the build, `tests.js` file is created and it resides under [build-directory]/assets. The file is combined of all tests in an application and it has a form of `define("", others..`. The value for `module-path` can have either string or regular expression, for instance: ```bash # When module path value is string. This will run all modules which match with the passed value $ ember exam --module-path='dummy/tests/helpers/module-for-acceptance' # When module path value is regex. This will run all modules which have `dummy` in it $ ember exam --module-path='!/dummy/' ``` The `file-path` option is to filter tests by *test file path*. The test file path is a location of the test file in a file system. You can specify `file-path` to a location of specific test file path or you can use wildcards in paths to target multiple test files. ```bash # This will run tests that are defined in `/my-application/tests/unit/my-test.js` $ ember exam --file-path='/my-application/tests/unit/my-test.js' # This will run all test files that are under `/my-application/tests/unit/` $ ember exam --file-path='/my-application/tests/unit/*.js' ``` ================================================ FILE: docs-app/index.md ================================================ --- # https://vitepress.dev/reference/default-theme-home-page layout: home hero: name: "ember-exam" # text: "Run your tests with randomization, splitting, and parallelization for beautiful tests." tagline: "Run your tests with randomization, splitting, and parallelization for beautiful tests." actions: - theme: brand text: Quickstart link: /quickstart # - theme: alt # text: API Examples # link: /api-examples features: - title: Partitioning details: Specify the number of parallel browser instances to use to speed up your test suite. - title: Load Balancing details: Balance tests to maximize the effectivess of parallel browsers that would otherwise completely quickly due to happenstance of being given quickly running tests. - title: Randomization details: Find and eliminate brittle tests by changing the order of tests within the test suite. - title: Replay details: Record and replay test execution order for reliably reproducing potentially flaky behaviors. --- ![Build Status](https://github.com/ember-cli/ember-exam/actions/workflows/ci.yml/badge.svg?event=push) [![NPM Version](https://badge.fury.io/js/ember-exam.svg)][npm] [![Ember Observer Score](https://emberobserver.com/badges/ember-exam.svg)][score] [npm]: https://npmjs.com/package/ember-exam [score]: https://emberobserver.com/addons/ember-exam Ember Exam is an addon to allow you more control over how you run your tests when used in conjunction with [ember-qunit](https://github.com/emberjs/ember-qunit). It provides the ability to randomize, split, parallelize, and load-balance your test suite by adding a more robust CLI command. It started as a way to help reduce flaky tests and encourage healthy test driven development. [![Introduction to Ember Exam](https://cloud.githubusercontent.com/assets/2922250/22800360/157ad67c-eed7-11e6-8d33-d2c59238c7f1.png)](https://embermap.com/video/ember-exam) ================================================ FILE: docs-app/load-balancing.md ================================================ # Test Load Balancing ```bash ember exam --parallel= --load-balance ``` The `load-balance` option allows you to load balance test files against multiple browsers. It will order the test files by test types, e.g. acceptance | integration | unit, and load balance the ordered test files between the browsers dynamically rather than statically. **Note:** parallel must be used along with load-balance to specify a number of browser(s) The `load-balance` option was added to version 1.1 to address execution performance when running against a large test suite. Web browsers and the testem server communicate via promise in order to send and receive a test file. The promise timeout value is set to be 2 seconds, and the timeout can be customized by adding asyncTimeout=[timeout] as a querystring param in the test URL or adding to a testem config. For example, if you specify `load-balance` and `parallel` equals 3, then three browser instances will be created and the output will look something like: ```bash # ember exam --parallel=3 --load-balance ok 1 Chrome 66.0 - Browser Id 1 - some test ok 2 Chrome 66.0 - Browser Id 2 - some another test ok 3 Chrome 66.0 - Browser Id 3 - some the other test ``` You can also specify the `split` and `partition` options with `load-balance` to load a portion of test modules on multiple CI containers. ```bash ember exam --split= --partition= --parallel= --load-balance ``` This command will split test files and load-balance tests from the specified partition across the browsers. For example `ember exam --split=2 -partition=1 --parallel=3 --load-balance`, the complete list of test files are split into two halves. With the first half of the list load balanced against three browsers. The output will look something like below: ```bash # ember exam --split=2 --partition=1 --parallel=3 --load-balance ok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test ok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test ok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test ``` **Important information on Load Balancing** 1. The `--load-balance` option is currently only supported in CI mode and for that reason no-launch cannot be used with load-balance. 2. You must be using `ember-cli` version 3.2.0 or greater for load balancing and test failure reproduction features to work properly. 3. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly. 4. You must be using `qunit` version 2.13.0 or greater for this feature to work properly. ## Test Failure Reproduction Due to the dynamic nature of the load-balance option, test file execution order can vary between runs. In order to reproduce a past test execution, the execution must be recorded via passing --write-execution-file or --wef, which allows generating a JSON file that enables rerunning the past test execution. The option is only allowed when load-balance is passed. ```bash # The command will load in test balanced mode with of browser(s). After the test suite execution, it will generate a test-execution json file. ember exam --parallel= --load-balance --wef ember exam --parallel= --load-balance --write-execution-file ``` The file is stored in the root directory and the naming structure is `test-execution-.json`. To replay the test execution for particular browser(s), do the following: ```bash # The command will read a test execution file specified for `replay-execution` and execute a browser Id(s) from `replay-browser` ember exam --replay-execution=[string] --replay-browser=[num] ``` `replay-execution` allows you to specify a path to the json file to run execution against and `replay-browser` is to specify browser ID(s) to execute. ```bash # The command will read test-execution-000000.json and load the list of modules mapped to browserId 1 ember exam --replay-execution=test-execution-000000.json --replay-browser=1 ``` The above command will read `test-execution-000000.json` and load the list of modules which is mapped by browser ID #1. `replay-browser` can be an array of browser IDs. For instance `--replay-browser=1,2` will start two browsers and execute a list of modules which were previously run by browsers #1 and #2. ```bash # The command will read test-execution-000000.json and load the list of module mapped to browserId 1 and 2 ember exam --replay-execution=test-execution-000000.json --replay-browser=1,2 ``` When `replay-browser` value is not specified it will execute browserId(s) read from `failedBrowser` in the test execution file. ```bash # The command will read test-execution-000000.json and load the list of modules mapped to browserIds from failedBrowser in the json file. ember exam --replay-execution=test-execution-000000.json ``` When `replay-browser` value is not specified and there is no value for `failedBrowser` in the json file it will rerun all list of modules. ```bash # The command will read test-execution-000000.json and load the list of module mapped to all browserIds when failedBrowser is none in the json file ember exam --replay-execution=test-execution-000000.json ``` **Important information on `--replay-execution` and `--replay-browser`** 1. You must be using `ember-cli` version 3.2.0 or greater for load-balnce and test failure reproduction features to work properly. 2. You must be using `ember-qunit` version 4.1.1 or greater for this feature to work properly. 3. You must be using `qunit` version 2.8.0 or greater for this feature to work properly. ================================================ FILE: docs-app/module-metadata.md ================================================ ### Generating Module Metadata File For Test Execution ```bash $ ember exam --write-module-metadata-file $ ember exam --wmmf ``` The `--write-module-metadata-file`, `wmmf` as an alias, allows you to generate a module metadata file after a test run. The module metadata file provides information about the test modules executed. It creates a json file, `module-metadata-.json`, which contains an array of elements representing metadata of modules executed by sorted by ascending order: ```json [ { "moduleName": "Module-name", "total": "Total number of tests in the module", "passed": "A number of passed tests in the module", "failed": "A number of failed tests in the module", "skipped": "A number of skipped tests in the module", "duration": "(ms) duration to execute all tests within the module", "failedTests": "A list of failed tests" } ] ``` and it looks something like below: ```json [ { "moduleName": "Slowest-module", "total": 12, "passed": 9, "failed": 1, "skipped": 2, "duration": 153, "failedTests": ["failed-test-1"] }, { "moduleName": "Fastest-module", "total": 2, "passed": 1, "failed": 0, "skipped": 0, "duration": 123, "failedTests": [] } ] ``` ================================================ FILE: docs-app/package.json ================================================ { "name": "docs", "private": true, "version": "1.0.0", "scripts": { "docs:dev": "vitepress dev .", "docs:build": "vitepress build .", "docs:preview": "vitepress preview ." }, "keywords": [], "author": "", "license": "MIT", "packageManager": "pnpm@10.33.0", "devDependencies": { "@algolia/client-search": "5.46.4", "search-insights": "2.17.3", "typescript": "5.9.3", "vite": "7.3.2", "vitepress": "1.6.4", "vitepress-plugin-llms": "1.10.0", "vue": "3.5.31" }, "dependencies": { "shiki": "^3.8.1" } } ================================================ FILE: docs-app/preserve-test-name.md ================================================ # Preserve Test Name When using `--split` and/or `--load-balance` the output will look something like: ```bash # ember exam --split=2 --partition=1 --parallel=3 --load-balance ok 1 Chrome 66.0 - Exam Partition 1 - browser Id 1 - some test ok 2 Chrome 66.0 - Exam Partition 1 - browser Id 2 - another test ok 3 Chrome 66.0 - Exam Partition 1 - browser Id 3 - some the other test ``` However, if you change the amount of parallelization, or randomize accross partitions, the output will change for the same test, which may be an issue if you are tracking test insights over time. ```bash # ember exam --split=2 --partition=1 --parallel=2 --load-balance ok 1 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some test ok 2 Chrome 66.0 - Exam Partition 1 - browser Id 1 - another test ok 3 Chrome 66.0 - Exam Partition 1 - browser Id 2 - some the other test ``` You can add `--preserve-test-name` to remove the dynamic segments of the output (partition and browser) to ensure the output test names are always the same. ```bash # ember exam --split=2 --partition=1 --parallel=3 --load-balance --preserve-test-name ok 1 Chrome 66.0 - some test ok 2 Chrome 66.0 - another test ok 3 Chrome 66.0 - some the other test ``` ================================================ FILE: docs-app/quickstart.md ================================================ # Quickstart ## Installation Installation is as easy as running: ```bash npm add --save-dev ember-exam ``` ## Usage Using Ember Exam is fairly straightforward as it extends directly from the default Ember-CLI `test` command. So, by default, it will work exactly the same as `ember test`. ```bash ember exam ember exam --filter='acceptance' ember exam --server ember exam --load-balance --parallel=1 ``` A value to an option can be passed with either `=` or a space. ```bash # A value of filter is acceptance ember exam --filter 'acceptance' # A value of parallel is 2 ember exam --load-balance --parallel=2 --server --no-launch # If a `=` is not used to pass a value to an option that requires a value, it will take anything passed after a space as it's value # In this instance, the value of parallel is --server ember exam --load-balance --parallel --server --no-launch ``` The idea is that you can replace `ember test` with `ember exam` and never look back. To get the unique features of Ember Exam (described in-depth below), you will need to **replace** the use of `start()` from `ember-qunit` in `test-helper.js` with `start()` from `ember-exam`: ## Setup ### With Vite Update your test-helper.js or test-helper.ts, to have add the ember-exam `start` function: ```diff // ... import { setApplication } from '@ember/test-helpers'; import { setup } from 'qunit-dom'; - import { start as qunitStart, setupEmberOnerrorValidation } from 'ember-qunit'; + import { setupEmberOnerrorValidation } from 'ember-qunit'; + import { start as startEmberExam } from 'ember-exam/test-support'; - export function start() { + export async function start({ availableModules }) { setApplication(Application.create(config.APP)); setup(QUnit.assert); setupEmberOnerrorValidation(); - qunitStart(); + // Options passed to `start` will be passed-through to ember-qunit + await startEmberExam({ availableModules }); } ``` Then, update your tests/index.html to pass availableModules to start: ```html ``` We need to tell vite to build the app before telling ember/exam to run tests on that output. Testing development: ```bash NODE_ENV=development vite build --mode development ember exam --path dist --config-file ./testem.cjs ``` Testing production: ```bash vite build --mode test ember exam --path dist --config-file ./testem.cjs ``` > [!NOTE] > Specifying the `--path` is important because otherwise ember-cli will try to build your vite app, and it will error. > [!NOTE] > Specifying the `--config-path` is important because ember-cli (what backs ember-exam) doesn't know about cjs files. ### broccoli / ember-cli To get the unique features of Ember Exam (described in-depth below), you will need to **replace** the use of `start()` from `ember-qunit` in `test-helper.js` with `start()` from `ember-exam`: ```js // test-helper.js - import { start, setupEmberOnerrorValidation } from 'ember-qunit'; + import { setupEmberOnerrorValidation } from 'ember-qunit'; + import { start } from 'ember-exam/test-support'; // Options passed to `start` will be passed-through to ember-qunit start(); ``` ### Version < `3.0.0` Prior to `2.1.0`, Ember Exam must be loaded by importing `addon-test-support/load.js` and calling `loadEmberExam`: ```js // test-helper.js import loadEmberExam from 'ember-exam/test-support/load'; loadEmberExam(); ``` ================================================ FILE: docs-app/randomization-iterator.md ================================================ # Randomization Iterator Randomization can be helpful for identifying non-atomic or order-dependent tests. To that end, Ember Exam provides an iterator to make it easy to test lots of variations in your test suite order quickly. ```bash ember exam:iterate ``` This command will build your application once, and then run the test suite with the `random` option for the specified number of iterations. You can optionally skip the build by using a previous build via the `path` option: ```bash ember exam:iterate --path ``` Finally, you can pass additional options through to the exam command used to run the tests via the `options` flag: ```bash ember exam:iterate --options ``` The `options` should be a string matching what you would use via the CLI. ================================================ FILE: docs-app/randomization.md ================================================ # Randomization ```bash ember exam --random[=] ``` The `random` option allows you to randomize the order in which your tests run. You can optionally specify a "seed" value from which to randomize your tests in order to reproduce results. The seed can be any string value. Regardless of whether you specify a seed or not, Ember Exam will log the seed value used for the randomization at the beginning of the test run: ```bash ember exam --random Randomizing tests with seed: liv5d1ixkco6qlatl6o7mbo6r ember exam --random=this_is1337 Randomizing tests with seed: this_is1337 ``` If you use `random` without specifying a seed, it must be the last argument you pass. Otherwise, Ember Exam will attempt to interpret any following arguments as the seed value. In other words: ```bash # don't do this ember exam --random --split=2 Randomizing tests with seed: --split=2 # this is not what we wanted # do this instead ember exam --split=2 --random Randomizing tests with seed: hwr74nkk55vzpvi ``` _Note: You must be using QUnit version `1.23.0` or greater for this feature to work properly. ================================================ FILE: docs-app/split-parallel.md ================================================ # Split Test Parallelization ```bash ember exam --split= --parallel ``` The `parallel` option allows you to run your split tests across multiple test pages in parallel in [Testem](https://github.com/testem/testem). It will use a separate browser instance for each group of tests. So, if you specify a split of 3, then 3 browser instances will be spawned with the output looking something like: ```bash ok 1 PhantomJS 1.9 - Exam Partition 1 - some test ok 2 PhantomJS 1.9 - Exam Partition 3 - some other other test ok 3 PhantomJS 1.9 - Exam Partition 2 - some other test ``` You can also combine the `parallel` option with the `partition` option to split tests, and then recombine partitions into parallel runs. This would, for example, allow you to run tests in multiple CI containers and have each CI container parallelize its list of tests. For example, if you wanted to run your tests across two containers, but have one of them run twice as many tests as the other, and run them in parallel, you could do this: ```bash # container 1 ember exam --split=3 --partition=1,2 --parallel ``` ```bash # container 2 ember exam --split=3 --partition=3 --parallel ``` **Note 1**: _Ember Exam will respect the `parallel` setting of your [Testem config file](https://github.com/testem/testem/blob/master/docs/config_file.md#config-level-options) while running tests in parallel. The default value for `parallel` in Testem is 1, which means you'll need a non-default value to actually see parallel behavior._ **Note 2**: _Ember Exam sets `process.env.EMBER_EXAM_SPLIT_COUNT` for convenience. You can use this in your Testem file._ **Note 3**: _You must be using Testem version `1.5.0` or greater for this feature to work properly._ ================================================ FILE: docs-app/splitting.md ================================================ # Splitting ```bash ember exam --split= ``` The `split` option allows you to specify the number of partitions greater than one to spread your tests across. Ember Exam will then proceed to run the first batch of tests. ```bash ember exam --split= --partition= ``` The `partition` option allows you to specify which test group to run after using the `split` option. It is one-indexed, so if you specify a split of 3, the last group you could run is 3 as well. You can also run multiple partitions, e.g.: ```bash ember exam --split=4 --partition=1 --partition=2 ``` _Note: Ember Exam splits tests by modifying the ember-qunit's `TestLoader` to bucket each test file into a partition, where each partition has an even number of test files. This makes it possible to have unbalanced partitions. To run your tests with balanced partitions, consider using `--load-balance`. For more info, see [_Test Load Balancing_](#test-load-balancing). ================================================ FILE: docs-app/test-suite-segmentation.md ================================================ # Test Suite Segmentation Some test suites like to segment which tests run based on various facets such as type of test, feature being tested, and so on. This can be accomplished by leveraging Testem's ability to have multiple test pages: ```json { "test_page": [ "tests/index.html?filter=acceptance", "tests/index.html?filter=!acceptance" ] } ``` You can use this feature in conjunction with Ember Exam's features, which will allow you to segment your test suite but still gain benefits from randomization and splitting. ================================================ FILE: docs-app/tsconfig.json ================================================ { "compilerOptions": { "module": "esnext", "target": "esnext", "moduleResolution": "bundler", "esModuleInterop": true, "strict": true, "skipLibCheck": true, "noUnusedLocals": true, "resolveJsonModule": true, "verbatimModuleSyntax": true, "jsx": "preserve", "lib": ["esnext", "dom", "dom.iterable"] }, "exclude": [ "**/node_modules/**", "**/dist/**", "template", "bin", "docs/snippets", "scripts" ] } ================================================ FILE: ember-cli-build.js ================================================ 'use strict'; const EmberAddon = require('ember-cli/lib/broccoli/ember-addon'); module.exports = function (defaults) { const self = defaults.project.findAddonByName('ember-exam'); const autoImport = self.options.autoImport; let app = new EmberAddon(defaults, { autoImport, babel: { plugins: [ // ... any other plugins require.resolve('ember-concurrency/async-arrow-task-transform'), // NOTE: put any code coverage plugins last, after the transform. ], }, }); const { maybeEmbroider } = require('@embroider/test-setup'); return maybeEmbroider(app, {}); }; ================================================ FILE: eslint.config.mjs ================================================ import globals from "globals"; import { ember } from "ember-eslint"; import * as url from "url"; // Needed until Node 20 const dirname = url.fileURLToPath(new URL(".", import.meta.url)); export default [ ...ember.recommended(dirname), { name: "monorepo-root:ignores", ignores: [ "docs-app/**/*", "test-apps/**/*", "acceptance-dist/**/*", "failure-dist/**/*", "addon-test-support/index.d.ts", ], }, { name: "monorepo-root:lib", files: ["lib/**/*"], languageOptions: { globals: { ...globals.node, }, }, }, { name: "monorepo-root:node-tests", files: ["node-tests/**/*"], languageOptions: { globals: { ...globals.node, ...globals.mocha, }, }, rules: { "ember/no-test-support-import": "off", }, }, ]; ================================================ FILE: index.js ================================================ /* eslint-env node */ 'use strict'; module.exports = { name: require('./package').name, includedCommands() { return require('./lib/commands'); }, }; ================================================ FILE: lib/commands/exam/iterate.js ================================================ 'use strict'; module.exports = { name: 'exam:iterate', description: "Runs your app's test suite in a random order for a number of iterations with the 'exam' command.", works: 'insideProject', anonymousOptions: [''], availableOptions: [ { name: 'options', type: String, default: '', description: 'A string of options to passthrough to the exam command', }, { name: 'path', type: String, default: '', description: 'The output path of a previous build to run tests against', }, ], /** * The output directory of the build used to run the test iterations. * * @type {String} */ _outputDir: 'iteration-dist', /** * Runs `ember exam` with random seeds for a number of iterations. The results * of each run are displayed in a table at the end of the command. This is * useful for pre-emptively identifying flaky/non-atomic tests in an offline * job. * * @override */ async run(commandOptions, anonymousOptions) { const needsBuild = !commandOptions.path; if (needsBuild) { await this._buildForTests(); } else { this._outputDir = commandOptions.path; } const numIterations = parseInt(anonymousOptions[0], 10); const options = commandOptions.options; const results = await this._runIterations(numIterations, options); if (needsBuild) { await this._cleanupBuild(); } await this._write(results.toString(), true); }, /** * Writes out a line with a standard color, unless specifically turned off. * * @param {String} input * @param {Boolean} noColor */ async _write(input, noColor) { if (!noColor) { const chalk = (await import('chalk')).default; input = chalk.blue(input); } console.info(input); }, /** * Builds the application into a special output directory to run the tests * against repeatedly without rebuilding. */ async _buildForTests() { await this._write('\nBuilding app for test iterations.'); const { execa } = await import('execa'); await execa( './node_modules/.bin/ember', ['build', '--output-path', `${this._outputDir}`], { stdio: 'inherit' }, ); }, /** * Cleans up the build artifacts used for the test iterations. */ async _cleanupBuild() { await this._write('\nCleaning up test iterations.\n'); const { execa } = await import('execa'); await execa('rm', ['-rf', `${this._outputDir}`]); }, /** * Runs iterations of the test suite and returns a table to display the * results. * * @param {Number} numIterations * @param {String} options * @return {Table} results */ async _runIterations(numIterations, options) { const chalk = (await import('chalk')).default; const Table = require('cli-table3'); const results = new Table({ head: [ chalk.blue('Iteration'), chalk.blue('Seed'), chalk.blue('Exit Code'), chalk.blue('Command'), ], }); for (let i = 0; i < numIterations; i++) { await this._write('\nRunning iteration #' + (i + 1) + '.'); const result = await this._runTests(options); results.push([i].concat(result)); } await this._write('\nRan ' + numIterations + ' iterations.'); return results; }, /** * Runs the test suite in a random order while allowing additional options. * Returns an array representing a row in the result table for _runIterations. * * @param {String} options * @return {Array} results */ async _runTests(options) { const chalk = (await import('chalk')).default; const execSync = require('child_process').execSync; const seed = Math.random().toString(36).slice(2); const command = './node_modules/.bin/ember exam --random ' + seed + ' --path ' + this._outputDir + ' ' + options; let exitCode; try { execSync(command, { stdio: 'inherit' }); exitCode = 0; } catch (error) { await this._write('Returned non-zero exit code with error: ' + error); exitCode = 1; process.exitCode = 1; } const color = exitCode ? chalk.red : chalk.green; return [color(seed), color(exitCode), color(command)]; }, }; ================================================ FILE: lib/commands/exam.js ================================================ 'use strict'; const { addToQuery } = require('../utils/query-helper'); // npmlog is used to write to testem server logs and `--testem-debug` enables to save the log file. const log = require('npmlog'); const { combineOptionValueIntoArray, getBrowserId, getMultipleTestPages, } = require('../utils/test-page-helper'); const TestemEvents = require('../utils/testem-events'); const TestCommand = require('ember-cli/lib/commands/test'); const TestServerTask = require('./task/test-server'); const TestTask = require('./task/test'); module.exports = TestCommand.extend({ name: 'exam', description: `Runs your app's test suite with more options than 'test'.`, works: 'insideProject', availableOptions: [ { name: 'split', type: Number, description: 'A number of files to split your tests across.', }, { name: 'partition', type: [Array, Number, String], description: 'The number of the partition(s) to run after splitting.', }, { name: 'parallel', type: [Number, String], description: 'Runs your split tests on parallel child processes.', }, { name: 'load-balance', type: Boolean, default: false, description: 'Load balance test modules. Test modules will be sorted by weight from slowest (acceptance) to fastest (unit).', }, { name: 'preserve-test-name', type: Boolean, default: false, aliases: ['ptn'], description: 'Preserve the test name when using load balance or split by omitting the partition and browser numbers.', }, { name: 'random', type: String, default: false, description: 'Randomizes your modules and tests while running your test suite.', }, { name: 'module-path', type: [String], aliases: ['mp'], description: 'Filters the list of modules to only those that matches by module paths, the value accepts either string or regex.', }, { name: 'file-path', type: [String], aliases: ['fp'], description: 'Filters the list of modules to only those that matches by test file paths, the value accepts either string or regex.', }, { name: 'replay-execution', type: String, default: false, aliases: ['re'], description: 'A JSON file path which maps from browser id(s) to a list of modules', }, { name: 'replay-browser', type: [Array, Number, String], aliases: ['rb'], description: 'The browser id(s) to replay from the replay-execution file', }, { name: 'write-execution-file', type: Boolean, default: false, aliases: ['wef'], description: 'Allows writing a test-execution json file after running your test suite', }, { name: 'write-module-metadata-file', type: Boolean, default: false, aliases: ['wmmf'], description: 'Allows writing a module metadata json file after running your test suite', }, ].concat(TestCommand.prototype.availableOptions), init() { this._super(...arguments); this.tasks.Test = TestTask; this.tasks.TestServer = TestServerTask; this.testemEvents = new TestemEvents(this.project.root); this.emberCliVersion = this.project.pkg.devDependencies['ember-cli'] || this.project.pkg.dependencies['ember-cli']; }, /** * Validates commandOptions * * @private * @param {Object} commandOptions * @return {Object} A map of what switches are enabled */ _validateOptions(commandOptions) { const Validator = require('../utils/tests-options-validator'); const validator = new Validator(commandOptions, this.emberCliVersion); return validator.validateCommands(); }, /** * Validates the command options and then runs the original test command. * * @param {Object} commandOptions * @override */ run(commandOptions) { this.commands = this._validateOptions(commandOptions); // TODO: explore not mutating the commandOptions input if (commandOptions.split) { commandOptions.query = addToQuery( commandOptions.query, 'split', commandOptions.split, ); process.env.EMBER_EXAM_SPLIT_COUNT = commandOptions.split; // Ignore the partition option when paralleling (we'll fill it in later) if (!commandOptions.parallel && commandOptions.partition) { const partitions = combineOptionValueIntoArray( commandOptions.partition, ); for (let i = 0; i < partitions.length; i++) { commandOptions.query = addToQuery( commandOptions.query, 'partition', partitions[i], ); } } } if (commandOptions.modulePath) { commandOptions.query = addToQuery( commandOptions.query, 'modulePath', commandOptions.modulePath, ); } if (commandOptions.preserveTestName) { commandOptions.query = addToQuery( commandOptions.query, 'preserveTestName', commandOptions.preserveTestName, ); } if (commandOptions.filePath) { commandOptions.query = addToQuery( commandOptions.query, 'filePath', commandOptions.filePath, ); } if (commandOptions.loadBalance) { commandOptions.query = addToQuery( commandOptions.query, 'loadBalance', commandOptions.loadBalance, ); } if (commandOptions.replayBrowser) { commandOptions.replayBrowser = combineOptionValueIntoArray( commandOptions.replayBrowser, ); } if (typeof commandOptions.random !== 'undefined') { commandOptions.query = this._randomize( commandOptions.random, commandOptions.query, ); } return this._super.run.apply(this, arguments); }, /** * Adds a `seed` param to the `query` to support randomization. Currently * only works with QUnit. * * @param {string} random * @param {string} query * @return {string} */ _randomize(random, query) { const seed = random !== '' ? random : Math.random().toString(36).slice(2); this.ui.writeLine('Randomizing tests with seed: ' + seed); return addToQuery(query, 'seed', seed); }, /** * Customizes the Testem config to have multiple test pages if attempting to * run in parallel or load-balance. It is important that the user specifies * the number of launchers to run in parallel in their testem.json config. * * @param {Object} commandOptions * @override */ _generateCustomConfigs(commandOptions) { const config = this._super._generateCustomConfigs.apply(this, arguments); let additionalEvents = this._setupAndGetBrowserSocketEvents(config); if (commandOptions.loadBalance || commandOptions.replayExecution) { const loadBalancingEvents = this._getLoadBalancingBrowserSocketEvents( { isLoadBalance: this.commands.get('loadBalance'), isReplayExecution: this.commands.get('replayExecution'), isWriteExecutionFile: this.commands.get('writeExecutionFile'), }, this.testemEvents, ); additionalEvents = Object.assign(additionalEvents, loadBalancingEvents); } config.custom_browser_socket_events = Object.assign( config.custom_browser_socket_events || {}, additionalEvents, ); if ( !commandOptions.loadBalance && !commandOptions.replayExecution && !commandOptions.parallel ) return config; config.testPage = getMultipleTestPages(config, commandOptions); if (commandOptions.replayExecution) { this.testemEvents.setReplayExecutionMap( commandOptions.replayExecution, commandOptions.replayBrowser, ); } return config; }, /** * Returns an event object to enable to send and receive module metadata * * @param {Object} config */ _setupAndGetBrowserSocketEvents(config) { const commands = this.commands; const testemEvents = this.testemEvents; const ui = this.ui; const browserExitHandler = function (failed = false) { const launcherId = this.launcher.id; if (!failed && commands.get('loadBalance')) { const browserId = getBrowserId(this.launcher); log.info( `Browser ${browserId} exiting. [ # of modules in current module queue ${ testemEvents.stateManager.getTestModuleQueue().length } ]`, ); // if getBrowserId cannot get the browserId // but the test queue is not empty, report the number of test modules left in the queue // otherwise, fail because testModuleQueue was not set if (browserId === 0) { if (testemEvents.stateManager.getTestModuleQueue() !== null) { ui.writeLine( `[ # of modules in current module queue ${ testemEvents.stateManager.getTestModuleQueue().length } ]`, ); } else { throw new Error('testModuleQueue is not set.'); } } } // config.testPage is undefined when parallization options are not used // Set browserCount default value to 1 to allow let browserCount = 1; // When using multiple browsers config.testPage is an array of test page urls. if (typeof config.testPage !== 'undefined') { browserCount = Object.keys(config.testPage).length; } testemEvents.completedBrowsersHandler( browserCount, launcherId, ui, commands, Date.now(), ); }; const browserTerminationHandler = function () { // browserTerminationHandler is called for disconnect, processError or processExit events. // disconnect and processExit events is fired during global error and successful test runs. // On successful test runs, browserExitHandler should already be called. And is unnecessary // to call it again, so we should return. This is covered by this.finish = true // On global failure cases, it's possible that this.finish is also true. So we must check // the timers set by onProcessExit // https://github.com/testem/testem/blob/master/lib/runners/browser_test_runner.js#L266 // or onProcessError in testem. // https://github.com/testem/testem/blob/master/lib/runners/browser_test_runner.js#L252 // If either timers is set, we should record the failed browser and call browserExitHandler if (this.finished && !this.onProcessExitTimer && !this.pendingTimer) { return; } if (commands.get('writeExecutionFile')) { testemEvents.recordFailedBrowserId(this.launcher, ui); } browserExitHandler.call(this, true); }; return this._getModuleMetadataAndBrowserExitSocketEvents( browserExitHandler, browserTerminationHandler, ); }, /** * Add browser socket events are needed for both with load-balance and without load-balance * * @param {Object} browserExitHandler * @param {Object} browserTerminationHandler */ _getModuleMetadataAndBrowserExitSocketEvents( browserExitHandler, browserTerminationHandler, ) { const events = {}; const testemEvents = this.testemEvents; let init = false; events['tests-start'] = function () { if (!init) { // process object is instantiated only when browsers are launched by testem server. // 1. `ember test/exam` where browsers are instantiated by testem - process is available // 2. `ember test/exam --server` where browsers can be instantiated by testem or manually // - process is available only when browsers are instantiated by testem // 3. `ember test/exam --serve --no-launch` where browsers are instantiated manually - process is undefined // 4. `ember serve` where browsers are instantiated manually by developer - process is available. if (typeof this.process !== 'undefined' && this.process !== null) { this.process.on('processExit', browserTerminationHandler.bind(this)); this.process.on('processError', browserTerminationHandler.bind(this)); } init = true; } if (typeof this.launcher !== 'undefined' && this.launcher !== null) { testemEvents.recordStartedLauncherId(this.launcher.id); } }; events['after-tests-complete'] = browserExitHandler; events['disconnect'] = function () { // To prevent handling exiting browser browser disconnects from errors `disconnect` callback's needed to be registered. browserTerminationHandler.bind(this)(); }; events['testem:test-done-metadata'] = (details) => { // Ensure module detail is available if (typeof details === 'object' && details !== null) { //store module name, test name, # of failed assertion, and duration. this.testemEvents.recordModuleMetadata({ moduleName: details.module, testName: details.name, passed: details.passed == details.total, failed: details.failed > 0, skipped: details.skipped, duration: details.runtime, }); } }; return events; }, /** * Return an event object which enables load balancing. * These event handlers will be registered on Testem's browserTestRunner socket instance * * @param {Object} commands * @param {Object} testemEvents */ _getLoadBalancingBrowserSocketEvents( { isLoadBalance, isReplayExecution, isWriteExecutionFile }, testemEvents, ) { const events = {}; const ui = this.ui; events['testem:set-modules-queue'] = function (modules, browserId) { testemEvents.setModuleQueue( browserId, modules, isLoadBalance, isReplayExecution, ); }; events['testem:next-module-request'] = function (browserId) { testemEvents.nextModuleResponse( browserId, this.socket, isWriteExecutionFile, ); }; events['test-result'] = function (result) { if (result.failed && isWriteExecutionFile) { testemEvents.recordFailedBrowserId(this.launcher, ui); } }; return events; }, }); ================================================ FILE: lib/commands/index.js ================================================ 'use strict'; module.exports = { exam: require('./exam'), 'exam:iterate': require('./exam/iterate'), }; ================================================ FILE: lib/commands/task/test-server.js ================================================ const TestServerTask = require('ember-cli/lib/tasks/test-server'); module.exports = TestServerTask.extend({ transformOptions(options) { const transformOptions = this._super(...arguments); transformOptions.custom_browser_socket_events = options.custom_browser_socket_events; transformOptions.browser_module_mapping = options.browser_module_mapping; return transformOptions; }, }); ================================================ FILE: lib/commands/task/test.js ================================================ const TestTask = require('ember-cli/lib/tasks/test'); module.exports = TestTask.extend({ transformOptions(options) { const transformOptions = this._super(...arguments); transformOptions.custom_browser_socket_events = options.custom_browser_socket_events; transformOptions.browser_module_mapping = options.browser_module_mapping; if (options.loadBalance) { /** * the parallel option is how testem knows to boot browsers simultaneously. * setting testPage to an array isn't enough. * default behavior is 1 browser at a time, which defeats the purpose of loadBalance. */ transformOptions.parallel = options.testPage.length; } return transformOptions; }, }); ================================================ FILE: lib/utils/config-reader.js ================================================ 'use strict'; const fs = require('fs-extra'); const yaml = require('js-yaml'); const path = require('path'); const debug = require('debug')('exam:config-reader'); const potentialConfigFiles = ['testem.js', 'testem.json', 'testem.cjs']; /** * Given an array of file paths, returns the first one that exists and is * accessible. Paths are relative to the process' cwd. * * @param {Array} files * @return {string} file */ function _findValidFile(files) { for (let i = 0; i < files.length; i++) { // TODO: investigate this cwd() usually they are in-error... const file = path.join(process.cwd(), files[i]); try { fs.accessSync(file, fs.F_OK); return file; } catch (error) { debug(`Failed to find ${file} due to error: ${error}`); continue; } } } /** * Reads in a given file according to it's 'type' as determined by file * extension. Supported types are `js` and `json`. * * @param {string} file * @return {Object} fileContents */ function _readFileByType(file) { if (typeof file === 'string') { const fileType = file.split('.').pop(); switch (fileType) { case 'js': case 'cjs': return require(file); case 'json': return fs.readJsonSync(file); case 'yaml': return yaml.load(fs.readFileSync(file)); default: throw new Error(`Unrecognized file extension for: ${file}`); } } } /** * Gets the application's testem config by trying a custom file first and then * defaulting to either `testem.js` or `testem.json`. * * @param {string} file * @param {Array} potentialFiles * @return {Object} config */ module.exports = function readTestemConfig( file, potentialFiles = potentialConfigFiles, ) { if (file) { potentialFiles.unshift(file); } const configFile = _findValidFile(potentialFiles); return configFile && _readFileByType(configFile); }; ================================================ FILE: lib/utils/execution-state-manager.js ================================================ 'use strict'; /** * A class to store the state of an execution. * * @class ExecutionStateManager */ class ExecutionStateManager { constructor(replayExecutionMap) { // A set of launcher id of attached browsers this._startedLaunchers = new Set(); // A map of browserId to test modules executed on that browser read from test-execution.json. this._replayExecutionMap = replayExecutionMap || null; // A map of browserId to test modules executed for the current test execution. this._browserToModuleMap = new Map(); // A map keeping the module execution details this._moduleMetadata = new Map(); // An array keeping the browserId of a browser with failing test this._failedBrowsers = []; this._completedBrowsers = new Map(); // An array of modules to load balance against browsers. This is used by `--load-balance` this._testModuleQueue = null; // A map of browserId to an array of test modules. This is used by `--replay-execution` this._replayExecutionModuleQueue = null; } /** * Returns the startedLaunchers * * @returns {Set} */ getStartedLaunchers() { return this._startedLaunchers; } /** * Add a new laucnher id to the startedLaunchers array. * * @param {number} launcherId * @returns {Boolean} */ addToStartedLaunchers(launcherId) { return this._startedLaunchers.add(launcherId); } /** * Returns the replayExecutionMap * * @returns {Object} */ getReplayExecutionMap() { return this._replayExecutionMap; } /** * Sets the replayExecutionMap * * @param {Object} replayModuleMap */ setReplayExecutionMap(replayModuleMap) { this._replayExecutionMap = replayModuleMap; } /** * Returns the testModuleQueue * * @returns {Object} */ getTestModuleQueue() { return this._testModuleQueue; } /** * Sets the shared module queue. * * @param {Object} moduleQueue */ setTestModuleQueue(moduleQueue) { this._testModuleQueue = moduleQueue; } /** * Gets the next module from the shared module queue * * @returns {string} */ getNextModuleTestModuleQueue() { if (this._testModuleQueue) { return this._testModuleQueue.shift(); } return null; } /** * Returns the array of modules belonging to browserId * * @param {number} browserId * @returns {Array} */ getReplayExecutionModuleQueue(browserId) { if (this._replayExecutionModuleQueue) { return this._replayExecutionModuleQueue.get(browserId); } return null; } /** * Sets the array of modules in browser module queue for browserId * * @param {Array} moduleQueue * @param {number} browserId */ setReplayExecutionModuleQueue(moduleQueue, browserId) { if (!this._replayExecutionModuleQueue) { this._replayExecutionModuleQueue = new Map(); } this._replayExecutionModuleQueue.set(browserId, moduleQueue.slice()); } /** * Gets the next module from the module array of browserId * * @param {number} browserId * @returns {string} */ getNextModuleReplayExecutionModuleQueue(browserId) { if ( this._replayExecutionModuleQueue && this._replayExecutionModuleQueue.get(browserId) ) { return this._replayExecutionModuleQueue.get(browserId).shift(); } return null; } /** * Returns the TestModuleQueue * * @returns {Set} */ getFailedBrowsers() { return this._failedBrowsers; } /** * Returns the whether or not the browserId is contained in the failBrowsers array. * * @param {number} browserId * @returns {Boolean} */ containsFailedBrowser(browserId) { return this._failedBrowsers.includes(browserId); } /** * Add a new browserId to the failedBrowser array. * * @param {number} browserId * @returns {Boolean} */ addFailedBrowsers(browserId) { return this._failedBrowsers.push(browserId); } /** * Returns the a map of browserId to modules array * * @returns {Object} */ getModuleMap() { return this._browserToModuleMap; } /** * Returns an array of modules run details * * @returns {Array} */ getModuleMetadata() { return this._moduleMetadata; } /** * Pushes the moduleName into the moduleArray of browserId * * @param {string} moduleName * @param {number} browserId */ addModuleNameToReplayExecutionMap(moduleName, browserId) { let browserModuleList = this._browserToModuleMap.get(browserId); if (Array.isArray(browserModuleList)) { browserModuleList.push(moduleName); } else { browserModuleList = [moduleName]; } this._browserToModuleMap.set(browserId, browserModuleList); } /** * Add module metadata mapped by moduleName to moduleMetadata Map. * * @param {string} moduleName * @param {number} total - Total number of tests * @param {number} passed - Number of passed tests * @param {number} failed - Number of failed tests * @param {number} duration - duration to execute tests in module in ms * @param {Array} failedTests - A list of failed test names */ _injectModuleMetadata( moduleName, total, passed, failed, skipped, duration, failedTests, ) { this._moduleMetadata.set(moduleName, { moduleName, total, passed, failed, skipped, duration, failedTests, }); } /** * Pushes the module detail into the moduleMetadata array * * @param {Object} metaData */ addToModuleMetadata(metadata) { if (!this._moduleMetadata.has(metadata.moduleName)) { // modulename, total, passed, failed, skipped, duration, failed tests this._injectModuleMetadata(metadata.moduleName, 0, 0, 0, 0, 0, []); } const curModuleMetadata = this._moduleMetadata.get(metadata.moduleName); if (!metadata.skipped && metadata.failed) { curModuleMetadata.failedTests.push(metadata.testName); } this._injectModuleMetadata( metadata.moduleName, curModuleMetadata.total + 1, !metadata.skipped && metadata.passed ? curModuleMetadata.passed + 1 : curModuleMetadata.passed, !metadata.skipped && metadata.failed ? curModuleMetadata.failed + 1 : curModuleMetadata.failed, metadata.skipped ? curModuleMetadata.skipped + 1 : curModuleMetadata.skipped, curModuleMetadata.duration + metadata.duration, curModuleMetadata.failedTests, ); } /** * Returns the number of completed browsers * * @returns {number} */ getCompletedBrowser() { return this._completedBrowsers.size; } /** * Book keep the browser id that has completed * * @param {number} browserId */ incrementCompletedBrowsers(browserId) { this._completedBrowsers.set(browserId, true); } } module.exports = ExecutionStateManager; ================================================ FILE: lib/utils/file-system-helper.js ================================================ const fs = require('fs-extra'); /** * Creates a file with targetJsonObject * * @param {string} fileName * @param {Object} targetJsonObject * @param {Object} option */ module.exports = function writeJsonToFile( fileName, targetJsonObject, option = {}, ) { try { fs.writeJsonSync(fileName, targetJsonObject, option); } catch (err) { if (typeof err === 'object' && err !== null) { err.file = err.file || fileName; } throw err; } }; ================================================ FILE: lib/utils/query-helper.js ================================================ 'use strict'; /** * Creates a valid query string by appending a given param and value to query. * * @param {string} query * @param {string} param * @param {string} value */ function addToQuery(query, param, value) { if (!value) { return query; } const queryAddParam = query ? query + '&' + param : param; return value !== true ? queryAddParam + '=' + value : queryAddParam; } /** * Adds a valid query string to a given url. * * @param {string} url * @param {string} param * @param {string} value */ function addToUrl(url, param, value) { const urlParts = url.split('?'); const base = urlParts[0]; const query = urlParts[1]; return base + '?' + addToQuery(query, param, value); } module.exports = { addToQuery, addToUrl, }; ================================================ FILE: lib/utils/test-page-helper.js ================================================ 'use strict'; const fs = require('fs-extra'); const readTestemConfig = require('../utils/config-reader'); const { addToUrl } = require('./query-helper'); /** * Add paramater such as split, loadbalance or partition to a base url if options are valid. * * @param {Object} commandOptions * @param {string} baseUrl * @return {string} baseUrl */ function _appendParamToBaseUrl(commandOptions, baseUrl) { if (commandOptions.parallel || commandOptions.split) { baseUrl = addToUrl(baseUrl, 'split', commandOptions.split); } // `loadBalance` is added to url when running replay-execution in order to emit `set-module-queue` in patch-test-loader. if (commandOptions.loadBalance || commandOptions.replayExecution) { const partitions = commandOptions.partition; baseUrl = addToUrl(baseUrl, 'loadBalance', true); if (partitions) { for (let i = 0; i < partitions.length; i++) { baseUrl = addToUrl(baseUrl, 'partition', partitions[i]); } } } return baseUrl; } /** * Generates an array by parsing optionValue. optionValue can be in a string form of '1,2', '3..5' * or '1,3..5' where '3..5' indicates a number sequence starting from 2 to 5. * * @param {string} optionValue * @return {Array} */ function _formatStringOptionValue(optionValue) { let valueArray = []; optionValue.split(',').forEach(function (val) { if (val.indexOf('..') > 0) { const arr = val.split('..'); const filledArray = _getFilledArray(arr.shift(), arr.pop()); valueArray = valueArray.concat(filledArray); } else { valueArray.push(val); } }); return valueArray; } /** * Generates multiple test pages: for a given baseUrl, it appends the partition numbers * or the browserId each page is running as query params. * * @param {string} customBaseUrl * @param {string} appendingParam * @param {Array} testPages */ function _generateTestPages(customBaseUrl, appendingParam, browserIds) { const testPages = []; for (let i = 0; i < browserIds.length; i++) { const url = addToUrl(customBaseUrl, appendingParam, browserIds[i]); testPages.push(url); } return testPages; } /** * Creates an array of numbers between the range of start to end. * * @param {number} start * @param {number} end * @return {Array} */ function _getFilledArray(start, end) { const length = end - start + 1; return Array.from({ length }, (_, i) => i + Number(start)); } /** * returns the failed browsers from the test execution json defined in executionJsonPath * other wise return an array of 1 to number of browsers spawned during the execution * * @param {string} executionJsonPath * @return {Array} testPages */ function _getReplayBrowsers(executionJsonPath) { const executionJson = fs.readJsonSync(executionJsonPath); if (executionJson.failedBrowsers.length > 0) { return executionJson.failedBrowsers; } return _getFilledArray(1, executionJson.numberOfBrowsers); } /** * Returns an array populated with numeric values represented by the optionValue. * e.g. [1, '2,3'] => [1, 2, 3], [1, '3..6'] => [1, 3, 4, 5, 6] * * @param {*} optionValue * @return {Array { if (typeof element === 'string') { return result.concat(_formatStringOptionValue(element)); } return result.concat(element); }, []); } /** * Returns the browserId of launcher * * @param {Object} launcher * @return {string} */ function getBrowserId(launcher) { try { const testPage = launcher.settings.test_page; const browserIdMatch = /browser=\s*([0-9]*)/.exec(testPage); if (Array.isArray(browserIdMatch) !== null && browserIdMatch !== null) { return browserIdMatch[1]; } } catch (err) { const errMsg = `${err.message} \n${ err.stack } \nLauncher Settings: ${JSON.stringify(launcher.settings, null, 2)}`; console.warn(errMsg); } return 0; } /** * Gets a test url in testem config to modify the url in order to generate multiple test pages * * @param {Object} configFile * @return {string} testPage */ function getTestUrlFromTestemConfig(configFile) { // Attempt to read in the testem config and use the test_page definition const testemConfig = readTestemConfig(configFile); let testPage = testemConfig && testemConfig.test_page; // If there is no test_page to use as the testPage, we warn that we're using // a default value if (!testPage) { console.warn( 'No test_page value found in the config. Defaulting to "tests/index.html?hidepassed"', ); testPage = 'tests/index.html?hidepassed'; } // Get the testPage from the generated config or the Testem config and // use it as the baseUrl to customize for the parallelized test pages or test load balancing return testPage; } /** * Creates an array of custom base urls by appending options that are specified * * @param {Object} commandOptions * @param {*} baseUrl * @return {string} */ function getCustomBaseUrl(commandOptions, baseUrl) { if (Array.isArray(baseUrl)) { return baseUrl.map((currentUrl) => { return _appendParamToBaseUrl(commandOptions, currentUrl); }); } else { return _appendParamToBaseUrl(commandOptions, baseUrl); } } /** * Ember-exam allows serving multiple browsers to run test suite. In order to acheive that test_page in testem config * has to be set with an array of multiple urls reflecting to command passed. * * @param {Object} config * @param {Object} commandOptions * @return {Array} testPages */ function getMultipleTestPages(config, commandOptions) { let testPages = Object.create(null); let browserIds = combineOptionValueIntoArray(commandOptions.partition); let appendingParam = 'partition'; if (commandOptions.loadBalance) { appendingParam = 'browser'; browserIds = _getFilledArray(1, commandOptions.parallel); } else if (commandOptions.parallel === 1 && browserIds.length === 0) { browserIds = _getFilledArray(1, commandOptions.split); } else if (commandOptions.replayExecution) { appendingParam = 'browser'; browserIds = combineOptionValueIntoArray(commandOptions.replayBrowser); if (browserIds.length === 0) { browserIds = _getReplayBrowsers(commandOptions.replayExecution); } } const baseUrl = config.testPage || getTestUrlFromTestemConfig(commandOptions.configFile); const customBaseUrl = getCustomBaseUrl(commandOptions, baseUrl); if (Array.isArray(customBaseUrl)) { testPages = customBaseUrl.reduce(function (testPages, customBaseUrl) { return testPages.concat( _generateTestPages(customBaseUrl, appendingParam, browserIds), ); }, []); } else { testPages = _generateTestPages(customBaseUrl, appendingParam, browserIds); } return testPages; } module.exports = { combineOptionValueIntoArray, getBrowserId, getCustomBaseUrl, getMultipleTestPages, getTestUrlFromTestemConfig, }; ================================================ FILE: lib/utils/testem-events.js ================================================ 'use strict'; const fs = require('fs-extra'); const path = require('path'); const ExecutionStateManager = require('./execution-state-manager'); const { getBrowserId } = require('../utils/test-page-helper'); const writeJsonToFile = require('./file-system-helper'); /** * Return sorted module metadata object by module duration. * * @param {Map} moduleMetadata */ function getSortedModuleMetaData(moduleMetadata) { return new Map( [...moduleMetadata.entries()].sort((a, b) => b[1].duration - a[1].duration), ); } /** * A class to coordinate testem events to enable load-balance functionality. * * @class TestemEvents */ class TestemEvents { constructor(root) { this.stateManager = new ExecutionStateManager(); this.root = root; } /** * Read the executionFilePath then: * if failed browsers are available, set the module map to the modules from the failed browsers. * else if replay-browser param is passed, set the module map specified browser id * else set module map to all the browser ran, effectively rerunning the same execution * * @param {string} executionFilePath * @param {Array} browserIdsToReplay * @return {Object} */ setReplayExecutionMap(executionFilePath, browserIdsToReplay) { const browserModuleMap = new Map(); let executionJson; try { executionJson = fs.readJsonSync(executionFilePath); } catch (err) { throw new Error(`Error reading reply execution JSON file - ${err}`); } if (browserIdsToReplay && browserIdsToReplay.length > 0) { browserIdsToReplay.forEach((browserId) => { browserModuleMap.set( browserId.toString(), executionJson.executionMapping[browserId.toString()], ); }); } else if (executionJson.failedBrowsers.length > 0) { executionJson.failedBrowsers.forEach((browserId) => { browserModuleMap.set( browserId, executionJson.executionMapping[browserId], ); }); } else { for ( let browserId = 1; browserId <= executionJson.numberOfBrowsers; browserId++ ) { browserModuleMap.set( browserId.toString(), executionJson.executionMapping[browserId.toString()], ); } } this.stateManager.setReplayExecutionMap(browserModuleMap); } /** * Set the moduleQueue, a list of test modules to be passed to browsers to execute. * * @param {number} browserId * @param {Array} modules * @param {boolean} loadBalance * @param {boolean} replayExecution * @param {string} writeExecutionFile */ setModuleQueue(browserId, modules, loadBalance, replayExecution) { const replayExecutionMap = this.stateManager.getReplayExecutionMap(); if (replayExecution) { if (!replayExecutionMap) { throw new Error('No replay execution map was set on the stateManager.'); } else if (!this.stateManager.getReplayExecutionModuleQueue(browserId)) { // Only set the moduleQueue once, ignore repeated requests this.stateManager.setReplayExecutionModuleQueue( replayExecutionMap.get(browserId), browserId, ); } } else if (loadBalance && !this.stateManager.getTestModuleQueue()) { // Only set the moduleQueue once, ignore repeated requests this.stateManager.setTestModuleQueue(modules); } } /** * Gets the next test module from the moduleQueue and emit back to the browser. * If moduleQueue is already empty, emit the module-queue-complete event, * signaling no more test module to run. * * @param {number} browserId * @param {Object} socket * @param {boolean} loadBalance * @param {boolean} writeExecutionFile */ nextModuleResponse(browserId, socket, writeExecutionFile) { const moduleQueue = this.stateManager.getTestModuleQueue() || this.stateManager.getReplayExecutionModuleQueue(browserId); if (!moduleQueue) { throw new Error('No moduleQueue was set.'); } const moduleName = moduleQueue.shift(); socket.emit('testem:next-module-response', { done: !moduleQueue.length && !moduleName, value: moduleName, }); // Keep track of the modules executed per browserId when running test suite with load-balance. // In replay-execution mode, we are already running a predefined set of modules, so no need // to save this again if (moduleName && writeExecutionFile) { this.stateManager.addModuleNameToReplayExecutionMap( moduleName, browserId, ); } } /** * Record the launched browser id * * @param {number} browserId */ recordStartedLauncherId(browserId) { this.stateManager.addToStartedLaunchers(browserId); } /** * Record the module run details to the stateManager * * @param {Object} metaData */ recordModuleMetadata(metaData) { this.stateManager.addToModuleMetadata(metaData); } /** * Gets browser id of launcher and stores the browser id stateManager * * @param {Object} launcher * @param {Object} ui */ recordFailedBrowserId(launcher, ui) { let browserId; try { browserId = getBrowserId(launcher); } catch (err) { ui.writeLine(err.message); } if ( (browserId !== null || typeof browserId !== 'undefined') && !this.stateManager.containsFailedBrowser(browserId) ) { this.stateManager.addFailedBrowsers(browserId); } } /** * Generates an object for test execution * * @param {number} browserCount */ _generatesModuleMapJsonObject(browserCount) { return { numberOfBrowsers: browserCount, failedBrowsers: this.stateManager.getFailedBrowsers(), executionMapping: (() => { let executionMapping = Object.create(null); for (const [ browserId, moduleList, ] of this.stateManager.getModuleMap()) { executionMapping[browserId] = moduleList; } return executionMapping; })(), }; } /** * Keep track of the number of browsers that completed executing its tests. * When all browsers complete, write test-execution.json to disk and clean up the stateManager * * @param {number} browserCount * @param {number} launcherId * @param {Object} ui * @param {Object} commands * @param {Object} currentDate */ completedBrowsersHandler( browserCount, launcherId, ui, commands, currentDate, ) { const browsersStarted = this.stateManager.getStartedLaunchers(); let browsersCompleted = false; this.stateManager.incrementCompletedBrowsers(launcherId); const completedBrowser = this.stateManager.getCompletedBrowser(); if (completedBrowser === browsersStarted.size) { if (commands.get('writeModuleMetadataFile')) { const moduleDetailFileName = path.join( this.root, `module-metadata-${currentDate}.json`, ); const sortedModuleMetadata = getSortedModuleMetaData( this.stateManager.getModuleMetadata(), ); writeJsonToFile( moduleDetailFileName, { requested: `${browserCount} browser(s)`, launched: `${browsersStarted.size} browser(s)`, modules: Array.from(sortedModuleMetadata.values()), }, { spaces: 2 }, ); ui.writeLine( `\nExecution module details were recorded at ${moduleDetailFileName}`, ); } if (commands.get('writeExecutionFile') && commands.get('loadBalance')) { const moduleMapJson = this._generatesModuleMapJsonObject(browserCount); const testExecutionPath = path.join( this.root, `test-execution-${currentDate}.json`, ); writeJsonToFile(testExecutionPath, moduleMapJson, { spaces: 2 }); ui.writeLine(`\nExecution was recorded at ${testExecutionPath}`); } ui.writeLine( `Out of requested ${browserCount} browser(s), ${browsersStarted.size} browser(s) was launched & completed.`, ); if (browserCount !== browsersStarted.size) { ui.writeLine('Waiting for remaining browsers to exited.'); } } if (completedBrowser === browserCount) { ui.writeLine('All browsers to exited.'); // --server mode allows rerun of tests by refreshing the browser // replayExecutionMap should be reused so the test-execution json // does not need to be reread const replayExecutionMap = this.stateManager.getReplayExecutionMap(); this.stateManager = new ExecutionStateManager(replayExecutionMap); browsersCompleted = true; } return browsersCompleted; } } module.exports = TestemEvents; ================================================ FILE: lib/utils/tests-options-validator.js ================================================ 'use strict'; const fs = require('fs-extra'); const SilentError = require('silent-error'); const semver = require('semver'); /** * Validates the specified partitions * * @private * @param {Array} partitions * @param {Number} split */ function validatePartitions(partitions, split) { validatePartitionSplit(partitions, split); validateElementsUnique(partitions, 'partition'); } /** * Returns thr number of browsers defined within the test execution file. * * @param {*} fileName */ function getNumberOfBrowser(fileName) { const executionJson = fs.readJsonSync(fileName); return executionJson.numberOfBrowsers; } /** * Validates the specified replay-browser * * @param {String} replayExecution * @param {Array} replayBrowser */ function validateReplayBrowser(replayExecution, replayBrowser) { if (!replayExecution) { throw new SilentError( 'EmberExam: You must specify replay-execution when using the replay-browser option.', ); } const numberOfBrowsers = getNumberOfBrowser(replayExecution); for (const i in replayBrowser) { const browserId = replayBrowser[i]; if (browserId < 1) { throw new SilentError( 'EmberExam: You must specify replay-browser values greater than or equal to 1.', ); } if (browserId > numberOfBrowsers) { throw new SilentError( 'EmberExam: You must specify replayBrowser value smaller than a number of browsers in the specified json file.', ); } } validateElementsUnique(replayBrowser, 'replayBrowser'); } /** * Determines if the specified partitions value makes sense for a given split. * * @private * @param {Array} partitions * @param {Number} split */ function validatePartitionSplit(partitions, split) { if (!split) { throw new SilentError( 'EmberExam: You must specify a `split` value in order to use `partition`.', ); } for (let i = 0; i < partitions.length; i++) { const partition = partitions[i]; if (partition < 1) { throw new SilentError( 'EmberExam: Split tests are one-indexed, so you must specify partition values greater than or equal to 1.', ); } if (partition > split) { throw new SilentError( 'EmberExam: You must specify `partition` values that are less than or equal to your `split` value.', ); } } } /** * Ensures that there is no value duplicated in a given array. * * @private * @param {Array} arr * @param {String} typeOfValue */ function validateElementsUnique(arr, typeOfValue) { const sorted = arr.slice().sort(); for (let i = 0; i < sorted.length - 1; i++) { if (sorted[i] === sorted[i + 1]) { const errorMsg = `EmberExam: You cannot specify the same ${typeOfValue} value twice. ${sorted[i]} is repeated.`; throw new SilentError(errorMsg); } } } /** * Performs logic related to validating command options for testing and * determining which functions to run on the tests. * * @class TestsOptionsValidator */ module.exports = class TestsOptionsValidator { constructor(options, emberCliVersion) { this.options = options; this.emberCliVersion = emberCliVersion; } /** * Validates the command and returns a map of the options and whether they are enabled or not. * * @public * @return {Object} Map of the options and whether they are enabled or not. */ validateCommands() { const validatedOptions = new Map(); if (this.options.writeModuleMetadataFile) { validatedOptions.set('writeModuleMetadataFile', true); } if (this.options.split || this.options.partition) { validatedOptions.set('split', this.validateSplit()); } // The parallel option accepts a number, which can be 0 if (typeof this.options.parallel !== 'undefined') { validatedOptions.set('parallel', this.validateParallel()); } // As random option can be an empty string it should check a type of random option rather than the option is not empty. if (typeof this.options.random !== 'undefined') { validatedOptions.set('random', this.validateRandom()); } if (typeof this.options.writeExecutionFile !== 'undefined') { validatedOptions.set( 'writeExecutionFile', this.validateWriteExecutionFile(), ); } if (this.options.loadBalance) { validatedOptions.set('loadBalance', this.validateLoadBalance()); } if (this.options.replayExecution || this.options.replayBrowser) { validatedOptions.set('replayExecution', this.validateReplayExecution()); } return validatedOptions; } /** * Determines if we should split the tests file and validates associated options * (`split`, `partition`). * * @return {boolean} */ validateSplit() { const options = this.options; let split = options.split; if (typeof split !== 'undefined' && split < 2) { console.warn( 'You should specify a number of files greater than 1 to split your tests across. Defaulting to 1 split which is the same as not using `split`.', ); split = 1; } if ( typeof split !== 'undefined' && typeof this.options.replayBrowser !== 'undefined' ) { throw new SilentError( 'EmberExam: You must not use the `replay-browser` option with the `split` option.', ); } if (typeof split !== 'undefined' && this.options.replayExecution) { throw new SilentError( 'EmberExam: You must not use the `replay-execution` option with the `split` option.', ); } const partitions = options.partition; if (typeof partitions !== 'undefined') { validatePartitions(partitions, split); } return !!split; } /** * Determines if we should randomize the tests and validates associated options * (`random`). * * @return {boolean} */ validateRandom() { return typeof this.options.random === 'string'; } /** * Determines if a test execution json file should be written after running a test suite and validates associated * * @return {boolean} */ validateWriteExecutionFile() { if (!this.options.writeExecutionFile) { return false; } if (!this.options.loadBalance) { throw new SilentError( 'EmberExam: You must run test suite with the `load-balance` option in order to use the `write-execution-file` option.', ); } else if (this.options.launch === 'false') { //When `--no-launch` option is passed, a value for launch in testem config is set to be string false. throw new SilentError( 'EmberExam: You must not use no-launch with write-execution-file option.', ); } return true; } /** * Determines if we should run split tests in parallel and validates associated * options (`parallel`). * * @return {boolean} */ validateParallel() { const parallelValue = parseInt(this.options.parallel, 10); if (isNaN(parallelValue)) { throw new SilentError( `EmberExam: You must specify a Numeric value to 'parallel'. Value passed: ${this.options.parallel}`, ); } this.options.parallel = parallelValue; if (typeof this.options.replayBrowser !== 'undefined') { throw new SilentError( 'EmberExam: You must not use the `replay-browser` option with the `parallel` option.', ); } if (this.options.replayExecution) { throw new SilentError( 'EmberExam: You must not use the `replay-execution` option with the `parallel` option.', ); } if (!this.options.loadBalance) { if (!this.options.split) { throw new SilentError( 'EmberExam: You must specify the `split` option in order to run your tests in parallel.', ); } else if (this.options.parallel !== 1) { throw new SilentError( 'EmberExam: When used with `split` or `partition`, `parallel` does not accept a value other than 1.', ); } } if (this.options.parallel < 1) { throw new SilentError( 'EmberExam: You must specify a value greater than 1 to `parallel`.', ); } return true; } /** * Determines if we should run tests in load balance mode. * options (`load-balance`). * * @return {boolean} */ validateLoadBalance() { // It's required to use ember-cli version 3.2.0 or greater to support the `load-balance` feature. const emberCliVersionRange = semver.validRange(this.emberCliVersion); if (semver.gtr('3.2.0', emberCliVersionRange)) { throw new SilentError( 'EmberExam: You must be using ember-cli version ^3.2.0 for this feature to work properly.', ); } if (typeof this.options.replayBrowser !== 'undefined') { throw new SilentError( 'EmberExam: You must not use the `replay-browser` option with the `load-balance` option.', ); } if (this.options.replayExecution) { throw new SilentError( 'EmberExam: You must not use the `replay-execution` option with the `load-balance` option.', ); } //When `--no-launch` option is passed, a value for launch in testem config is set to be string false. if (this.options.launch === 'false') { throw new SilentError( 'EmberExam: You must not use `no-launch` option with the `load-balance` option.', ); } if (!this.options.parallel) { throw new SilentError( 'EmberExam: You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`.', ); } return true; } /** * Determines if we should replay execution for reproduction. * options (`replay-execution`). * * @return {boolean} */ validateReplayExecution() { const replayBrowser = this.options.replayBrowser; const replayExecution = this.options.replayExecution; if (!replayExecution) { return false; } if (this.options.launch === 'false') { throw new SilentError( 'EmberExam: You must not use `no-launch` option with the `replay-execution` option.', ); } if (replayBrowser) { validateReplayBrowser(replayExecution, replayBrowser, this.options); } return true; } }; ================================================ FILE: node-tests/.eslintrc ================================================ { "env": { "mocha": true }, "rules": { "no-var": 0 } } ================================================ FILE: node-tests/acceptance/exam/vite/vite-test.js ================================================ const path = require('path'); const assert = require('assert'); const { rimrafSync } = require('rimraf'); const { ROOT, execa, getNumberOfTests } = require('../../helpers.js'); const DIR = path.resolve(ROOT, 'test-apps/vite-with-compat'); const TEST_OUTPUT_DIR = 'dist'; describe('Command | exam | vite', function () { this.timeout(300000); before(function () { // Cleanup any previous runs rimrafSync(TEST_OUTPUT_DIR); // Build the app return execa('pnpm', ['build:tests', '--outDir', TEST_OUTPUT_DIR], { cwd: DIR, }); }); after(function () { rimrafSync(TEST_OUTPUT_DIR); }); describe('without exam', function () { it('has passing tests with just testem', async function () { let result = await execa('testem', ['ci'], { cwd: DIR, env: { TESTEM_DIR: TEST_OUTPUT_DIR, }, }); assert.strictEqual(getNumberOfTests(result.stdout), 6); assert.strictEqual(result.stdout.includes('Suite A'), true); assert.strictEqual(result.stdout.includes('Suite B'), true); }); }); describe('split', function () { describe('parallel', function () { it('has no shared tests between partitions', async function () { let resultA = await execa( 'ember', [ 'exam', '--test-port', '1337', '--split', '2', '--partition', '1', '--path', TEST_OUTPUT_DIR, '--parallel', ], { cwd: DIR }, ); let resultB = await execa( 'ember', [ 'exam', '--test-port', '1338', '--split', '2', '--partition', '2', '--path', TEST_OUTPUT_DIR, '--parallel', ], { cwd: DIR }, ); assert.strictEqual(getNumberOfTests(resultA.stdout), 3); assert.strictEqual(resultA.stdout.includes('Suite A'), true); assert.strictEqual(resultA.stdout.includes('Suite B'), false); assert.strictEqual(getNumberOfTests(resultB.stdout), 3); assert.strictEqual(resultB.stdout.includes('Suite B'), true); assert.strictEqual(resultB.stdout.includes('Suite A'), false); }); }); }); describe('loadBalance', function () { it('has no shared tests between partitions', async function () { let result = await execa( 'ember', [ 'exam', '--test-port', '1339', '--load-balance', '--path', TEST_OUTPUT_DIR, '--parallel', '2', ], { cwd: DIR }, ); assert.strictEqual(getNumberOfTests(result.stdout), 6); assert.strictEqual(result.stdout.includes('Suite A'), true); assert.strictEqual(result.stdout.includes('Suite B'), true); }); }); }); ================================================ FILE: node-tests/acceptance/exam-iterate-test.js ================================================ 'use strict'; const assert = require('assert'); const { rimrafSync } = require('rimraf'); const fs = require('fs-extra'); const path = require('path'); function assertExpectRejection() { assert.ok(false, 'Expected promise to reject, but it fullfilled'); } async function execa(command, args) { const { execa: originalExeca } = await import('execa'); return originalExeca(command, args); } describe('Acceptance | Exam Iterate Command', function () { this.timeout(300000); it('should build the app, test it a number of times, and clean it up', function () { return execa('ember', ['exam:iterate', '2']).then((child) => { const stdout = child.stdout; assert.ok( stdout.includes('Building app for test iterations.'), 'Logged building message from command', ); assert.ok( stdout.includes('Built project successfully.'), 'Built successfully according to Ember-CLI', ); assert.ok( stdout.includes('Running iteration #1.'), 'Logs first iteration', ); assert.ok( stdout.includes('Running iteration #2.'), 'Logs second iteration', ); const seedRE = /Randomizing tests with seed: (.*)/g; const firstSeed = seedRE.exec(stdout)[1]; const secondSeed = seedRE.exec(stdout)[1]; assert.ok(firstSeed, 'first seed exists'); assert.ok(secondSeed, 'second seed exists'); assert.notEqual( firstSeed, secondSeed, 'the first and second seeds are not the same', ); assert.ok( stdout.includes('Cleaning up test iterations.'), 'Logged cleaning up message from command', ); assert.throws( () => fs.accessSync('iteration-dist', fs.F_OK), 'iteration-dist is cleaned up', ); }); }); it('should test the app with additional options passed in and catch failure cases', function () { const execution = execa('ember', [ 'exam:iterate', '2', '--options', '--parallel', ]); return execution.then(assertExpectRejection, (error) => { const splitErrorRE = /You must specify the `split` option in order to run your tests in parallel./g; assert.ok( splitErrorRE.test(error.stderr), 'expected stderr to contain the appropriate error message', ); assert.strictEqual(error.exitCode, 1); assert.strictEqual(error.failed, true); assert.strictEqual(error.killed, false); }); }); describe('building', function () { const buildDir = path.join(process.cwd(), 'dist'); afterEach(() => rimrafSync(buildDir)); it('should not build the app or clean it up, but use an existing build to test', function () { return execa('ember', ['build']).then(() => { execa('ember', ['exam:iterate', '2', '--path', 'dist']).then( (child) => { const stdout = child.stdout; assert.ok( !stdout.includes('Building app for test iterations.'), 'No logged building message from command', ); assert.ok( !stdout.includes('Built project successfully.'), 'Not built successfully according to Ember-CLI', ); assert.ok( stdout.includes('Running iteration #1.'), 'Logs first iteration', ); assert.ok( stdout.includes('Running iteration #2.'), 'Logs second iteration', ); const seedRE = /Randomizing tests with seed: (.*)/g; const firstSeed = seedRE.exec(stdout)[1]; const secondSeed = seedRE.exec(stdout)[1]; assert.ok(firstSeed, 'first seed exists'); assert.ok(secondSeed, 'second seed exists'); assert.notEqual( firstSeed, secondSeed, 'the first and second seeds are not the same', ); assert.ok( !stdout.includes('Cleaning up test iterations.'), 'No logged cleaning up message from command', ); assert.throws( () => fs.accessSync('iteration-dist', fs.F_OK), 'iteration-dist is non-existent', ); assert.doesNotThrow( () => fs.accessSync(buildDir, fs.F_OK), 'dist is not cleaned up', ); }, ); }); }); }); describe('Exit Code', function () { const destPath = path.join( __dirname, '..', '..', 'tests', 'unit', 'failing-test.js', ); beforeEach(function () { const failingTestPath = path.join( __dirname, '..', 'fixtures', 'failure.js', ); fs.copySync(failingTestPath, destPath); }); afterEach(function () { fs.removeSync(destPath); }); it('should have an exitCode of 1 when a test fails', function () { return execa('ember', ['exam:iterate', '1']).then( assertExpectRejection, (error) => { assert.strictEqual(error.exitCode, 1); assert.strictEqual(error.killed, false); }, ); }); }); }); ================================================ FILE: node-tests/acceptance/exam-test.js ================================================ 'use strict'; const assert = require('assert'); const fixturify = require('fixturify'); const fs = require('fs-extra'); const path = require('path'); const { rimrafSync } = require('rimraf'); const glob = require('glob'); const { execa, getNumberOfTests } = require('./helpers'); function assertExpectRejection() { assert.ok(false, 'Expected promise to reject, but it fullfilled'); } const TOTAL_NUM_TESTS = 67; // Total Number of tests without the global 'Ember.onerror validation tests' function getTotalNumberOfTests(output) { // In ember-qunit 3.4.0, this new check was added: https://github.com/emberjs/ember-qunit/commit/a7e93c4b4b535dae62fed992b46c00b62bfc83f4 // which adds this Ember.onerror validation test. // As Ember.onerror validation test is added per browser the total number of tests executed should be the sum of TOTAL_NUM_TESTS defined and a number of browsers. const emberOnerror = output.match( /ember-qunit: Ember.onerror validation: Ember.onerror is functioning properly/g, ); return TOTAL_NUM_TESTS + (emberOnerror ? emberOnerror.length : 0); } describe('Acceptance | Exam Command', function () { this.timeout(300000); before(function () { // Cleanup any previous runs rimrafSync('acceptance-dist'); // Build the app return execa('ember', ['build', '--output-path', 'acceptance-dist']); }); after(function () { rimrafSync('acceptance-dist'); }); function assertOutput(output, text, good, bad) { good.forEach(function (partition) { assert.ok( output.includes(`${text} ${partition} `), `output has ${text} ${partition}`, ); }); (bad || []).forEach(function (partition) { assert.ok( !output.includes(`${text} ${partition} `), `output does not have ${text} ${partition}`, ); }); } function assertAllPartitions(output) { assertOutput(output, 'Exam Partition', [1, 2, 3]); assert.strictEqual( getNumberOfTests(output), getTotalNumberOfTests(output), 'ran all of the tests in the suite', ); } function assertSomePartitions(output, good, bad) { assertOutput(output, 'Exam Partition', good, bad); assert.ok( getNumberOfTests(output) < getTotalNumberOfTests(output), 'did not run all of the tests in the suite', ); } it('runs all tests normally', function () { return execa('ember', ['exam', '--path', 'acceptance-dist']).then( (child) => { const stdout = child.stdout; assert.ok( !stdout.includes('Exam Partition'), 'does not add any sort of partition info', ); assert.strictEqual( getNumberOfTests(stdout), getTotalNumberOfTests(stdout), 'ran all of the tests in the suite', ); }, ); }); describe('Execute tests with load() in test-helper', function () { const originalTestHelperPath = path.join( __dirname, '..', '..', 'tests', 'test-helper.js', ); const renamedOriginalTestHelperPath = path.join( __dirname, '..', '..', 'tests', 'test-helper-orig.js', ); const testHelperWithLoadPath = path.join( __dirname, '..', 'fixtures', 'test-helper-with-load.js', ); before(function () { // Use test-helper-with-load.js as the test-helper.js file fs.renameSync(originalTestHelperPath, renamedOriginalTestHelperPath); fs.copySync(testHelperWithLoadPath, originalTestHelperPath); // Build the app return execa('ember', [ 'build', '--output-path', 'acceptance-with-load-dist', ]); }); after(function () { rimrafSync('acceptance-with-load-dist'); // restore the original test-helper.js file fs.unlinkSync(originalTestHelperPath); fs.renameSync(renamedOriginalTestHelperPath, originalTestHelperPath); }); it('runs all tests normally', function () { return execa('ember', [ 'exam', '--path', 'acceptance-with-load-dist', ]).then((child) => { const stdout = child.stdout; assert.ok( !stdout.includes('Exam Partition'), 'does not add any sort of partition info', ); assert.strictEqual( getNumberOfTests(stdout), getTotalNumberOfTests(stdout), 'ran all of the tests in the suite', ); }); }); }); describe('Split', function () { it('splits the test suite but only runs the first partition', function () { return execa('ember', [ 'exam', '--split', '3', '--path', 'acceptance-dist', ]).then((child) => { assertSomePartitions(child.stdout, [1], [2, 3]); }); }); describe('Partition', function () { it('splits the test suite and runs a specified partition', function () { return execa('ember', [ 'exam', '--split', '3', '--partition', '2', '--path', 'acceptance-dist', ]).then((child) => { assertSomePartitions(child.stdout, [2], [1, 3]); }); }); it('splits the test suite and runs multiple specified partitions', function () { return execa('ember', [ 'exam', '--split', '3', '--partition', '1,3', '--path', 'acceptance-dist', ]).then((child) => { assertSomePartitions(child.stdout, ['1,3'], [1, 2, 3]); }); }); it('errors when running an invalid partition', function () { return execa('ember', [ 'exam', '--split', '3', '--partition', '4', '--path', 'acceptance-dist', ]).then(assertExpectRejection, (error) => { assert.ok( error.stderr.includes( 'You must specify `partition` values that are less than or equal to your `split` value.', ), ); }); }); it('errors when specifying a partition but no split count', function () { return execa('ember', [ 'exam', '--partition', '2', '--path', 'acceptance-dist', ]).then(assertExpectRejection, (error) => { assert.ok( error.stderr.includes( 'You must specify a `split` value in order to use `partition`.', ), ); }); }); }); describe('Parallel', function () { it('runs multiple partitions in parallel', function () { return execa('ember', [ 'exam', '--path', 'acceptance-dist', '--split', '3', '--parallel', ]).then((child) => { assertAllPartitions(child.stdout); }); }); it('runs multiple specified partitions in parallel', function () { return execa('ember', [ 'exam', '--split', '3', '--partition', '1,3', '--path', 'acceptance-dist', '--parallel', ]).then((child) => { assertSomePartitions(child.stdout, [1, 3], [2]); }); }); }); }); describe('Random', function () { it('runs tests with the passed in seeds', function () { return execa('ember', [ 'exam', '--random', '1337', '--path', 'acceptance-dist', ]).then((child) => { const stdout = child.stdout; assert.ok( stdout.includes('Randomizing tests with seed: 1337'), 'logged the seed value', ); assert.strictEqual( getNumberOfTests(stdout), getTotalNumberOfTests(stdout), 'ran all of the tests in the suite', ); }); }); }); describe('Load Balance', function () { const unlinkFiles = []; function assertTestExecutionFailedBrowsers(output, numberOfFailedBrowsers) { const testExecutionPath = path.join( process.cwd(), output.match(/test-execution-([0-9]*).json/g)[0], ); unlinkFiles.push(testExecutionPath); assert.ok( fs.existsSync(testExecutionPath), 'test execution json written to root', ); const testExecutionFile = fs.readJsonSync(testExecutionPath); assert.strictEqual( testExecutionFile.failedBrowsers.length, numberOfFailedBrowsers, 'failed browsers array is correctly recorded', ); } function assertModuleDetailJson(output) { let moduleRunDetailJsonPath = path.join( process.cwd(), output.match(/module-metadata-([0-9]*).json/g)[0], ); unlinkFiles.push(moduleRunDetailJsonPath); assert.ok( fs.existsSync(moduleRunDetailJsonPath), 'module run detail json written to root', ); } afterEach(() => { unlinkFiles.forEach((path) => { fs.unlinkSync(path); }); unlinkFiles.length = 0; }); it('errors if `--parallel` is not passed', function () { return execa('ember', [ 'exam', '--path', 'acceptance-dist', '--load-balance', ]).then(assertExpectRejection, (error) => { assert.ok( error.stderr.includes( 'You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`.', ), ); }); }); it('load balances the test suite with one browser', function () { return execa('ember', [ 'exam', '--path', 'acceptance-dist', '--write-execution-file', '--load-balance', '--parallel', ]).then((child) => { const output = child.stdout; assertTestExecutionFailedBrowsers(output, 0); assertOutput(output, 'Browser Id', [1]); assert.strictEqual( getNumberOfTests(output), getTotalNumberOfTests(output), 'ran all of the tests in the suite', ); }); }); it('should write module detail json after execution with `write-module-metadata-file`.', function () { return execa('ember', [ 'exam', '--path', 'acceptance-dist', '--load-balance', '--write-module-metadata-file', '--parallel', ]).then((child) => { const output = child.stdout; assertModuleDetailJson(output); }); }); it('load balances the test suite with 3 browsers', function () { return execa('ember', [ 'exam', '--path', 'acceptance-dist', '--load-balance', '--parallel', '3', '--write-execution-file', ]).then((child) => { const output = child.stdout; assertTestExecutionFailedBrowsers(output, 0); assertOutput(output, 'Browser Id', [1, 2, 3]); assert.strictEqual( getNumberOfTests(output), getTotalNumberOfTests(output), 'ran all of the tests in the suite', ); }); }); it("load balances partition 1's test suite with 3 browsers", function () { return execa('ember', [ 'exam', '--path', 'acceptance-dist', '--load-balance', '--split', '2', '--partition', '1', '--parallel', '3', '--write-execution-file', ]).then((child) => { const output = child.stdout; assertTestExecutionFailedBrowsers(output, 0); assertOutput(output, 'Exam Partition', [1], [2]); assertOutput(output, 'Browser Id', [1, 2, 3]); assert.ok( getNumberOfTests(output) < getTotalNumberOfTests(output), 'did not run all of the tests in the suite', ); }); }); describe('Failure Cases', function () { const destPath = path.join( __dirname, '..', '..', 'tests', 'unit', 'browser-exit-test.js', ); beforeEach(function () { const failingTestPath = path.join( __dirname, '..', 'fixtures', 'browser-exit.js', ); fs.copySync(failingTestPath, destPath); return execa('ember', ['build', '--output-path', 'failure-dist']); }); afterEach(function () { rimrafSync('failure-dist'); fs.removeSync(destPath); }); it('should write test-execution json when browser exits', function () { return execa('ember', [ 'exam', '--path', 'failure-dist', '--load-balance', '--parallel', '3', '--write-execution-file', ]).then(assertExpectRejection, (error) => { const output = error.stdout; assert.ok( output.includes( 'Error: Browser exited on request from test driver', ), `browser exited during the test execution:\n${output}`, ); assertTestExecutionFailedBrowsers(output, 1); }); }); it('should write module metadata json when browser exits', function () { return execa('ember', [ 'exam', '--path', 'failure-dist', '--load-balance', '--parallel', '2', '--write-module-metadata-file', ]).then(assertExpectRejection, (error) => { const output = error.stdout; assert.ok( output.includes( 'Error: Browser exited on request from test driver', ), `browser exited during the test execution:\n${output}`, ); assertModuleDetailJson(output); }); }); }); }); describe('Replay Execution', function () { let testExecutionJson = {}; beforeEach(() => { testExecutionJson = { numberOfBrowsers: 2, failedBrowsers: [], executionMapping: { 1: [ 'dummy/tests/unit/test-loader-test', 'dummy/tests/unit/multiple-edge-cases-test', 'dummy/tests/unit/multiple-ember-tests-test', ], 2: [ 'dummy/tests/unit/multiple-tests-test', 'dummy/tests/unit/testem-output-test', 'dummy/tests/unit/weight-test-modules-test', 'dummy/tests/unit/async-iterator-test', 'dummy/tests/unit/filter-test-modules-test', ], }, }; }); afterEach(() => { fs.unlinkSync(path.join(process.cwd(), 'test-execution-123.json')); glob.sync('module-metadata-*.json').forEach((file) => { fs.unlinkSync(path.join(process.cwd(), file)); }); }); it('replay only the failed browsers defined in failedBrowsers array', function () { testExecutionJson.failedBrowsers.push('1'); fixturify.writeSync(process.cwd(), { 'test-execution-123.json': JSON.stringify(testExecutionJson), }); return execa('ember', [ 'exam', '--replay-execution', 'test-execution-123.json', '--path', 'acceptance-dist', ]).then((child) => { const output = child.stdout; assert.strictEqual( output.match(/test-execution-([0-9]*).json/g), null, 'no test execution json should be written', ); assertOutput(output, 'Browser Id', [1]); assert.strictEqual( getNumberOfTests(output), 25, 'ran all of the tests for browser one', ); }); }); it('replay the full execution if failedBrowsers is empty', function () { fixturify.writeSync(process.cwd(), { 'test-execution-123.json': JSON.stringify(testExecutionJson), }); return execa('ember', [ 'exam', '--replay-execution', 'test-execution-123.json', '--path', 'acceptance-dist', ]).then((child) => { const output = child.stdout; assert.strictEqual( output.match(/test-execution-([0-9]*).json/g), null, 'no test execution json should be written', ); assertOutput(output, 'Browser Id', [1, 2]); assert.strictEqual( getNumberOfTests(output), getTotalNumberOfTests(output), 'ran all of the tests in the suite', ); }); }); it('replay only the specified execution by --replay-browser', function () { fixturify.writeSync(process.cwd(), { 'test-execution-123.json': JSON.stringify(testExecutionJson), }); return execa('ember', [ 'exam', '--replay-execution', 'test-execution-123.json', '--replay-browser', '2', '--path', 'acceptance-dist', ]).then((child) => { const output = child.stdout; assert.strictEqual( output.match(/test-execution-([0-9]*).json/g), null, 'no test execution json should be written', ); assertOutput(output, 'Browser Id', ['2']); assert.strictEqual( getNumberOfTests(output), 44, 'ran all of the tests for browser two', ); }); }); }); }); ================================================ FILE: node-tests/acceptance/helpers.js ================================================ const path = require('path'); const fsExtra = require('fs-extra'); async function execa(command, args, options) { const { execa: originalExeca } = await import('execa'); return originalExeca(command, args, options); } function getNumberOfTests(str) { const match = str.match(/# tests ([0-9]+)/); return match && parseInt(match[1], 10); } const ROOT = path.resolve(__dirname, '../../'); const FIXTURE_DIR = path.resolve(ROOT, 'node-tests/fixtures'); function applyFixture({ fixture, to }) { fsExtra.copySync(path.join(FIXTURE_DIR, fixture), to); } module.exports = { execa, getNumberOfTests, applyFixture, ROOT }; ================================================ FILE: node-tests/fixtures/browser-exit.js ================================================ import { module, test } from 'qunit'; module('Module With Infinite Loop'); test('Infinite loop test #1', function (assert) { assert.expect(1); let condition = true; while (condition) { condition = condition || true; } assert.ok(true); }); ================================================ FILE: node-tests/fixtures/failure.js ================================================ throw 'failure'; ================================================ FILE: node-tests/fixtures/test-helper-with-load.js ================================================ import Application from 'dummy/app'; import config from 'dummy/config/environment'; import { setApplication } from '@ember/test-helpers'; import loadEmberExam from 'ember-exam/test-support/load'; import { start, setupEmberOnerrorValidation } from 'ember-qunit'; import { loadTests } from 'ember-qunit/test-loader'; setApplication(Application.create(config.APP)); setupEmberOnerrorValidation(); loadEmberExam(); loadTests(); start(); ================================================ FILE: node-tests/fixtures/vite-eager-test-load.html ================================================ ViteWithCompat Tests {{content-for "head"}} {{content-for "test-head"}} {{content-for "head-footer"}} {{content-for "test-head-footer"}} {{content-for "body"}} {{content-for "test-body"}}
{{content-for "body-footer"}} ================================================ FILE: node-tests/list.mjs ================================================ /** * This file is used by CI to list all the test files we have to generate a matrix to run. * Since our node-tests dirty the working directory, we don't want to have different test files stepping on each other. * * Additionally, this allows us to run each test file in parallel with the other test files. * * The code here was copied from https://github.com/embroider-build/try/blob/main/cli.js * (ish) */ import assert from "node:assert"; // We run this file on node 24 // eslint-disable-next-line n/no-unsupported-features/node-builtins import { glob } from "node:fs/promises"; let files = [ { name: "Unit", command: "pnpm mocha 'node-tests/unit/**/*-test.js'", }, ]; for await (const entry of glob( "node-tests/acceptance/**/*-test.{js,ts,mjs,cjs,mts,cts}", )) { let name = entry.replace("node-tests/acceptance/", ""); files.push({ name, include: { name, command: `pnpm mocha ${entry}`, }, }); } assert( files.length > 0, `There were no found test files -- this is unexpected`, ); process.stdout.write( JSON.stringify({ name: files.map((s) => s.name), // always include an empty env by default, so that it's convenient to pass // `${{ matrix.env }}` in github actions include: files.map((s) => ({ env: {}, ...s.include })), }), ); ================================================ FILE: node-tests/unit/commands/exam-test.js ================================================ 'use strict'; const assert = require('assert'); const MockProject = require('ember-cli/tests/helpers/mock-project'); const Task = require('ember-cli/lib/models/task'); const RSVP = require('rsvp'); const sinon = require('sinon'); const ExamCommand = require('../../../lib/commands/exam'); describe('ExamCommand', function () { function createCommand() { const tasks = { Build: Task.extend(), Test: Task.extend(), }; const project = new MockProject(); project.isEmberCLIProject = function () { return true; }; project.pkg = { devDependencies: { 'ember-cli': '3.7.0', }, }; return new ExamCommand({ project: project, tasks: tasks, ui: { writeLine: function () {}, }, }); } describe('run', function () { let command; let called; beforeEach(function () { command = createCommand(); called = {}; command.tasks.Test.prototype.run = function (options) { called.testRun = true; called.testRunOptions = options; return RSVP.resolve(); }; command.tasks.Build.prototype.run = function () { called.buildRun = true; return RSVP.resolve(); }; }); it('should defer to super with normal build task', function () { return command.run({}).then(function () { assert.strictEqual(called.testRun, true); assert.strictEqual(called.buildRun, true); }); }); it('should set `modulePath` in the query option', function () { return command.run({ modulePath: 'foo' }).then(function () { assert.strictEqual(called.testRunOptions.query, 'modulePath=foo'); }); }); it('should set `partition` in the query option with one partition', function () { return command.run({ split: 2, partition: [2] }).then(function () { assert.strictEqual(called.testRunOptions.query, 'split=2&partition=2'); }); }); it('should set `load-balance` in the query option', function () { return command.run({ loadBalance: true, parallel: 1 }).then(function () { assert.strictEqual(called.testRunOptions.query, 'loadBalance'); }); }); it('should set `preserve-test-name` in the query option', function () { return command.run({ preserveTestName: true }).then(function () { assert.strictEqual(called.testRunOptions.query, 'preserveTestName'); }); }); it('should set `partition` in the query option with multiple partitions', function () { return command.run({ split: 2, partition: [1, 2] }).then(function () { assert.strictEqual( called.testRunOptions.query, 'split=2&partition=1&partition=2', ); }); }); it('should append `partition` to the query option', function () { return command .run({ split: 2, partition: [2], query: 'someQuery=derp&hidepassed' }) .then(function () { assert.strictEqual( called.testRunOptions.query, 'someQuery=derp&hidepassed&split=2&partition=2', ); }); }); it('should not append `partition` to the query option when parallelizing', function () { return command .run({ split: 2, partition: [1, 2], parallel: 1 }) .then(function () { assert.strictEqual(called.testRunOptions.query, 'split=2'); }); }); it('should not append `partition` to the query option when not parallelizing without partitions', function () { return command.run({ split: 2 }).then(function () { assert.strictEqual(called.testRunOptions.query, 'split=2'); }); }); it('should set `seed=1337` in the query option', function () { return command.run({ random: '1337' }).then(function () { assert.strictEqual(called.testRunOptions.query, 'seed=1337'); }); }); it('should append `seed=1337` to the query option', function () { return command .run({ random: '1337', query: 'someQuery=derp&hidepassed' }) .then(function () { assert.strictEqual( called.testRunOptions.query, 'someQuery=derp&hidepassed&seed=1337', ); }); }); it('should set `seed=random_seed` in the query option', function () { const randomStub = sinon.stub(Math, 'random').returns(' random_seed'); return command.run({ random: '' }).then(function () { assert.strictEqual(called.testRunOptions.query, 'seed=random_seed'); randomStub.restore(); }); }); it('should set split env var', function () { return command.run({ split: 5 }).then(function () { assert.strictEqual(process.env.EMBER_EXAM_SPLIT_COUNT, '5'); }); }); }); }); ================================================ FILE: node-tests/unit/utils/config-reader-test.js ================================================ 'use strict'; const assert = require('assert'); const fixturify = require('fixturify'); const fs = require('fs-extra'); const path = require('path'); const readTestemConfig = require('../../../lib/utils/config-reader'); const fixturifyDir = 'tmp/fixture'; describe('ConfigReader | readTestemConfig', function () { beforeEach(function () { fs.mkdirpSync(fixturifyDir); this.fixturifyContent = { foo: 'bar', }; }); afterEach(function () { fs.removeSync(fixturifyDir); }); it('should find `testem.js` file by default and return `true` when no file name and no potential files specified', function () { fixturify.writeSync(fixturifyDir, {}); assert.ok(readTestemConfig()); }); it("should return `false` if file doesn't exsit when potential files are empty list", function () { assert.ok(!readTestemConfig('this-file-do-not-exsit.json', [])); }); it("should find `testem.js` file by default and return `true` when file specified doesn't exist", function () { assert.ok(readTestemConfig('this-file-do-not-exsit.json')); }); it('should require a specified `js` file and return an object in the module when no potential files specified', function () { assert.deepEqual(readTestemConfig('testem.simple-test-page.js').foo, 'bar'); }); it('should require a specified `js` file and return an object in the module when the file exsits and potential files are empty list', function () { assert.deepEqual( readTestemConfig('testem.simple-test-page.js', []).foo, 'bar', ); }); it('should read a specified `json` file and return an object read from the file', function () { fixturify.writeSync(fixturifyDir, { 'testem.json-file.json': JSON.stringify(this.fixturifyContent), }); assert.deepEqual( readTestemConfig(path.join(fixturifyDir, 'testem.json-file.json'), []) .foo, 'bar', ); }); it('should read a specified `yaml` file and return an object read from the file', function () { fixturify.writeSync(fixturifyDir, { 'testem.yaml-file.yaml': JSON.stringify(this.fixturifyContent), }); assert.deepEqual( readTestemConfig(path.join(fixturifyDir, 'testem.yaml-file.yaml'), []) .foo, 'bar', ); }); }); ================================================ FILE: node-tests/unit/utils/execution-state-manager-test.js ================================================ 'use strict'; const assert = require('assert'); const ExecutionStateManager = require('../../../lib/utils/execution-state-manager'); describe('ExecutionStateManager', function () { beforeEach(function () { this.stateManager = new ExecutionStateManager(); this.moduleQueue = ['foo', 'bar', 'baz', 'boo', 'far', 'faz']; }); describe('initializeStates', function () { it('initialize states', function () { assert.deepEqual(this.stateManager.getModuleMap().size, 0); assert.deepEqual(this.stateManager.getTestModuleQueue(), null); assert.deepEqual(this.stateManager.getReplayExecutionModuleQueue(), null); }); }); describe('moduleQueue', function () { it('is shared when no browserId passed to setModuleQueue', function () { this.stateManager.setTestModuleQueue(this.moduleQueue); assert.deepEqual( this.stateManager.getTestModuleQueue(), this.moduleQueue, 'the correct moduleQueue was returned', ); }); it('returns the next module from the shared moduleQueue and state is preserved', function () { this.stateManager.setTestModuleQueue(this.moduleQueue); assert.strictEqual( this.stateManager.getNextModuleTestModuleQueue(), 'foo', 'correctly returns the next module', ); assert.deepEqual( this.stateManager.getTestModuleQueue(), ['bar', 'baz', 'boo', 'far', 'faz'], 'the moduleQueue state was updated', ); }); it('get next module returns null if shared moduleQueue is not set', function () { assert.strictEqual( this.stateManager.getNextModuleTestModuleQueue(), null, 'returns null when moduleQueue has not been set', ); }); it('had different queue set when when browserId is specified', function () { const anotherQueue = ['1', '2', '3', '4']; this.stateManager.setReplayExecutionModuleQueue(this.moduleQueue, 1); this.stateManager.setReplayExecutionModuleQueue(anotherQueue, 2); assert.deepEqual( this.stateManager.getReplayExecutionModuleQueue(1), this.moduleQueue, ); assert.deepEqual( this.stateManager.getReplayExecutionModuleQueue(2), anotherQueue, ); }); it('returns the next module from the browser specific moduleQueue and state is preserved', function () { this.stateManager.setReplayExecutionModuleQueue(this.moduleQueue, 1); assert.strictEqual( this.stateManager.getNextModuleReplayExecutionModuleQueue(1), 'foo', 'correctly returns the next module', ); assert.deepEqual( this.stateManager.getReplayExecutionModuleQueue(1), ['bar', 'baz', 'boo', 'far', 'faz'], 'the moduleQueue state was updated', ); }); it('get next module returns null if browser moduleQueue is not set', function () { assert.strictEqual( this.stateManager.getNextModuleReplayExecutionModuleQueue(1), null, 'returns null when moduleQueue has not been set', ); }); }); describe('completedBrowsers', function () { it('incrementCompletedBrowsers called for the same browserId will only be accounted once', function () { this.stateManager.incrementCompletedBrowsers(1); this.stateManager.incrementCompletedBrowsers(1); assert.deepEqual(this.stateManager.getCompletedBrowser(), 1); }); }); describe('moduleRunDetails', function () { it('returns a map size of 0', function () { assert.strictEqual(this.stateManager.getModuleMetadata().size, 0); }); it('adds a single testDone module metadata to moduleMetadata.', function () { const testModuleName = 'foo'; const moduleMetadata = { moduleName: testModuleName, testName: 'testing foo', passed: 1, failed: 0, skipped: false, total: 1, duration: 1, }; this.stateManager.addToModuleMetadata(moduleMetadata); const fooModuleMetadata = this.stateManager .getModuleMetadata() .get(testModuleName); assert.strictEqual(fooModuleMetadata.passed, 1); assert.strictEqual(fooModuleMetadata.failed, 0); assert.strictEqual(fooModuleMetadata.skipped, 0); assert.strictEqual(fooModuleMetadata.duration, 1); assert.strictEqual(fooModuleMetadata.failedTests.length, 0); }); it('adds two test metadata and returns cumulative module data', function () { const fooTestModule = 'foo'; const fooTestMetadata = { moduleName: fooTestModule, testName: 'testing foo', passed: 1, failed: 0, skipped: false, total: 1, duration: 1, }; const barTestMetadata = { moduleName: fooTestModule, testName: 'testing bar', passed: 0, failed: 1, skipped: false, total: 1, duration: 1.8, }; this.stateManager.addToModuleMetadata(fooTestMetadata); this.stateManager.addToModuleMetadata(barTestMetadata); const fooModuleMetadata = this.stateManager .getModuleMetadata() .get(fooTestModule); assert.strictEqual(fooModuleMetadata.total, 2); assert.strictEqual(fooModuleMetadata.passed, 1); assert.strictEqual(fooModuleMetadata.failed, 1); assert.strictEqual(fooModuleMetadata.skipped, 0); assert.strictEqual(fooModuleMetadata.duration, 2.8); assert.strictEqual(fooModuleMetadata.failedTests.length, 1); }); }); }); ================================================ FILE: node-tests/unit/utils/query-helper-test.js ================================================ 'use strict'; const assert = require('assert'); const { addToQuery, addToUrl } = require('../../../lib/utils/query-helper'); describe('QueryHelper', function () { describe('addToQuery', function () { it('should add param when no query and value is true', function () { const validQuery = addToQuery(null, 'foo', true); assert.deepEqual(validQuery, 'foo'); }); it('should add param and value when no query and value is string', function () { const validQuery = addToQuery(null, 'foo', 'bar'); assert.deepEqual(validQuery, 'foo=bar'); }); it('should add param to query when value is boolean', function () { const validQuery = addToQuery('foo', 'bar', true); assert.deepEqual(validQuery, 'foo&bar'); }); it('should add param and value to query when value is string', function () { const validQuery = addToQuery('foo', 'bar', 'baz'); assert.deepEqual(validQuery, 'foo&bar=baz'); }); it('should not add param when value is false', function () { const validQuery = addToQuery('foo', 'bar', false); assert.deepEqual(validQuery, 'foo'); }); }); describe('addToUrl', function () { it('should add param to url when value is true', function () { const url = addToUrl('tests/index.html?hidepassed', 'foo', true); assert.deepEqual(url, 'tests/index.html?hidepassed&foo'); }); it('should not add param to url when value is false', function () { const url = addToUrl('tests/index.html?hidepassed', 'foo', false); assert.deepEqual(url, 'tests/index.html?hidepassed'); }); it('should add param and value to url when value is string', function () { const url = addToUrl('tests/index.html?hidepassed', 'foo', 'bar'); assert.deepEqual(url, 'tests/index.html?hidepassed&foo=bar'); }); }); }); ================================================ FILE: node-tests/unit/utils/test-page-helper-test.js ================================================ 'use strict'; const assert = require('assert'); const sinon = require('sinon'); const { combineOptionValueIntoArray, getBrowserId, getCustomBaseUrl, getMultipleTestPages, getTestUrlFromTestemConfig, } = require('../../../lib/utils/test-page-helper'); describe('TestPageHelper', function () { describe('combineOptionValueIntoArray', function () { it('should return empty array when no optionValue specified', function () { assert.deepEqual(combineOptionValueIntoArray(), []); }); it('should have a specified option number when the option is number', function () { assert.deepEqual(combineOptionValueIntoArray(3), [3]); }); it('should have a number of array when a specified option is string', function () { assert.deepEqual(combineOptionValueIntoArray('2,3'), [2, 3]); }); it('should have a numbe of array when a specified option is a combination of number and string ', function () { assert.deepEqual(combineOptionValueIntoArray([1, '2,3']), [1, 2, 3]); }); it('should have a sequence number of array when a specified option is in range', function () { assert.deepEqual(combineOptionValueIntoArray('1..5'), [1, 2, 3, 4, 5]); }); it('should have a number of array when a specified option is a combination of number and string in range', function () { assert.deepEqual( combineOptionValueIntoArray([1, '3..6']), [1, 3, 4, 5, 6], ); }); }); describe('getBrowserId', function () { it('should return the correct browserId', function () { const launcher = { settings: { test_page: 'loadBalance&browser=1', }, }; assert.strictEqual(getBrowserId(launcher), '1'); }); it('should throw an error if the launcher does not have test page set', function () { const warnStub = sinon.stub(console, 'warn'); const launcher = { foo: 'bar', }; assert.strictEqual(getBrowserId(launcher), 0); sinon.assert.calledOnce(warnStub); sinon.assert.calledWithMatch(warnStub, /Launcher Settings:/); warnStub.restore(); }); }); describe('getTestUrlFromTestemConfig', function () { it('should have a default test page with no config file', function () { const testPage = getTestUrlFromTestemConfig(''); assert.deepEqual(testPage, 'tests/index.html?hidepassed'); }); it('should have a default test page with no test-page specified in a testem config file', function () { const warnStub = sinon.stub(console, 'warn'); const testPage = getTestUrlFromTestemConfig('testem.no-test-page.js'); assert.deepEqual(testPage, 'tests/index.html?hidepassed'); sinon.assert.calledOnce(warnStub); sinon.assert.calledWithExactly( warnStub, 'No test_page value found in the config. Defaulting to "tests/index.html?hidepassed"', ); warnStub.restore(); }); it('should have multiple test pages specified in testem config file with test-page specified in the file', function () { const testPages = getTestUrlFromTestemConfig( 'testem.multiple-test-page.js', ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ]); }); }); describe('getCustomBaseUrl', function () { it('should add `split` when `split` option is used', function () { const appendedUrl = getCustomBaseUrl( { split: 3 }, 'tests/index.html?hidepassed', ); assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&split=3'); }); it('should add `split` when `split` and `parallel` option are used', function () { const appendedUrl = getCustomBaseUrl( { split: 5, parallel: true }, 'tests/index.html?hidepassed', ); assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&split=5'); }); it('should add `loadBalance` when `load-balance` option is used', function () { const appendedUrl = getCustomBaseUrl( { loadBalance: 2 }, 'tests/index.html?hidepassed', ); assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&loadBalance'); }); it('should add `split`, `loadBalance`, and `partition` when `split`, `loadBalance`, and `partition` are used.', function () { const appendedUrl = getCustomBaseUrl( { split: 5, partition: [1, 2, 3], loadBalance: 2 }, 'tests/index.html?hidepassed', ); assert.deepEqual( appendedUrl, 'tests/index.html?hidepassed&split=5&loadBalance&partition=1&partition=2&partition=3', ); }); it('should add `loadBalance` when `replay-execution` and `replay-browser` are used', function () { const appendedUrl = getCustomBaseUrl( { replayExecution: 'test-execution-0000000.json', replayBrowser: [1, 2], }, 'tests/index.html?hidepassed', ); assert.deepEqual(appendedUrl, 'tests/index.html?hidepassed&loadBalance'); }); it('should add `split` to multiple test pages when `split` option is used', function () { const appendedUrl = getCustomBaseUrl({ split: 3 }, [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ]); assert.deepEqual(appendedUrl, [ 'tests/index.html?hidepassed&derp=herp&split=3', 'tests/index.html?hidepassed&foo=bar&split=3', ]); }); it('should add `split` when `split` to multiple test pages and `parallel` option are used', function () { const appendedUrl = getCustomBaseUrl({ split: 5, parallel: true }, [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ]); assert.deepEqual(appendedUrl, [ 'tests/index.html?hidepassed&derp=herp&split=5', 'tests/index.html?hidepassed&foo=bar&split=5', ]); }); it('should add `loadBalance` to multiple test pages when `load-balance` option is used', function () { const appendedUrl = getCustomBaseUrl({ loadBalance: 2 }, [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ]); assert.deepEqual(appendedUrl, [ 'tests/index.html?hidepassed&derp=herp&loadBalance', 'tests/index.html?hidepassed&foo=bar&loadBalance', ]); }); it('should add `split`, `loadBalance`, and `partition` to multiple test pages when `split`, `loadBalance`, and `partition` are used.', function () { const appendedUrl = getCustomBaseUrl( { split: 5, partition: [1, 2, 3], loadBalance: 2 }, [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ], ); assert.deepEqual(appendedUrl, [ 'tests/index.html?hidepassed&derp=herp&split=5&loadBalance&partition=1&partition=2&partition=3', 'tests/index.html?hidepassed&foo=bar&split=5&loadBalance&partition=1&partition=2&partition=3', ]); }); it('should add `loadBalance` to multiple test pages when `replay-execution` and `replay-browser` are used', function () { const appendedUrl = getCustomBaseUrl( { replayExecution: 'test-execution-0000000.json', replayBrowser: [1, 2], }, [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ], ); assert.deepEqual(appendedUrl, [ 'tests/index.html?hidepassed&derp=herp&loadBalance', 'tests/index.html?hidepassed&foo=bar&loadBalance', ]); }); }); describe('getMultipleTestPages', function () { it('should have multiple test pages with no partitions specified', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { parallel: 1, split: 2 }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&split=2&partition=1', 'tests/index.html?hidepassed&split=2&partition=2', ]); }); it('should have multiple test pages with specified partitions', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { parallel: 1, split: 4, partition: [3, 4] }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&split=4&partition=3', 'tests/index.html?hidepassed&split=4&partition=4', ]); }); it('should have multiple test pages for each test_page in the config file with no partitions specified', function () { const testPages = getMultipleTestPages( { configFile: 'testem.multiple-test-page.js' }, { parallel: 1, split: 2 }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&derp=herp&split=2&partition=1', 'tests/index.html?hidepassed&derp=herp&split=2&partition=2', 'tests/index.html?hidepassed&foo=bar&split=2&partition=1', 'tests/index.html?hidepassed&foo=bar&split=2&partition=2', ]); }); it('should have multiple test pages for each test_page in the config file with partitions specified', function () { const testPages = getMultipleTestPages( { configFile: 'testem.multiple-test-page.js' }, { parallel: 1, split: 4, partition: [3, 4] }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&derp=herp&split=4&partition=3', 'tests/index.html?hidepassed&derp=herp&split=4&partition=4', 'tests/index.html?hidepassed&foo=bar&split=4&partition=3', 'tests/index.html?hidepassed&foo=bar&split=4&partition=4', ]); }); it('should have a test page with `loadBalance` when no specified number of browser', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { loadBalance: true, parallel: 1 }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&loadBalance&browser=1', ]); }); it('should have multiple test page with `loadBalance` with splitting when no specified number of browser', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { loadBalance: true, parallel: 1, split: 2 }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&split=2&loadBalance&browser=1', ]); }); it('should have multiple test pages with test loading balanced, no specified partitions and no splitting', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { loadBalance: true, parallel: 2 }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&loadBalance&browser=1', 'tests/index.html?hidepassed&loadBalance&browser=2', ]); }); it('should have multiple test pages with test loading balanced, no specified partitions and no splitting', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { loadBalance: true, parallel: 2, split: 3, partition: [2, 3] }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&split=3&loadBalance&partition=2&partition=3&browser=1', 'tests/index.html?hidepassed&split=3&loadBalance&partition=2&partition=3&browser=2', ]); }); it('should have multiple test pages for each test_page in the config file with partitions specified and test loading balanced', function () { const testPages = getMultipleTestPages( { configFile: 'testem.multiple-test-page.js' }, { loadBalance: true, parallel: 1, split: 4, partition: [3, 4] }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&derp=herp&split=4&loadBalance&partition=3&partition=4&browser=1', 'tests/index.html?hidepassed&foo=bar&split=4&loadBalance&partition=3&partition=4&browser=1', ]); }); it('should have multiple test pages with test replay execution', function () { const testPages = getMultipleTestPages( { testPage: 'tests/index.html?hidepassed' }, { replayExecution: 'abc.json', replayBrowser: [2] }, ); assert.deepEqual(testPages, [ 'tests/index.html?hidepassed&loadBalance&browser=2', ]); }); }); }); ================================================ FILE: node-tests/unit/utils/testem-events-test.js ================================================ 'use strict'; const assert = require('assert'); const fixturify = require('fixturify'); const fs = require('fs-extra'); const path = require('path'); const TestemEvents = require('../../../lib/utils/testem-events'); const fixtureDir = 'tmp/fixture'; const testExecutionJsonPath = path.join(fixtureDir, 'test-execution-123.json'); const testExecutionJson = { numberOfBrowsers: 1, failedBrowsers: [], executionMapping: { 1: ['path/to/testmodule', 'path/to/another/testmodule'], }, }; describe('TestemEvents', function () { beforeEach(function () { fs.mkdirpSync(fixtureDir); this.testemEvents = new TestemEvents(fixtureDir); this.moduleQueue = ['foo', 'bar', 'baz', 'boo', 'far', 'faz']; }); afterEach(function () { fs.removeSync(fixtureDir); }); describe('setModuleQueue', function () { beforeEach(function () { fixturify.writeSync(fixtureDir, { 'test-execution-123.json': JSON.stringify(testExecutionJson), }); }); it('set TestModuleQueue for load-balance mode', function () { this.testemEvents.setModuleQueue(1, this.moduleQueue, true, false); assert.deepEqual( this.testemEvents.stateManager.getTestModuleQueue(), this.moduleQueue, ); }); it('ignore subsequent setModuleQueue if moduleQueue is already set for load-balance mode', function () { const anotherModuleQueue = ['a', 'b', 'c']; this.testemEvents.setModuleQueue('1', this.moduleQueue, true, false); this.testemEvents.setModuleQueue('2', anotherModuleQueue, true, false); assert.deepEqual( this.testemEvents.stateManager.getTestModuleQueue(), this.moduleQueue, ); }); it('set replayExecutionModuleQueue for replay-execution mode', function () { this.testemEvents.setReplayExecutionMap(testExecutionJsonPath, ['1']); this.testemEvents.setModuleQueue('1', this.moduleQueue, false, true); assert.deepEqual( this.testemEvents.stateManager.getReplayExecutionModuleQueue('1'), testExecutionJson.executionMapping['1'], ); }); it('set replayExecutionModuleQueue for replay-execution mode when replay-browser is undefined', function () { this.testemEvents.setReplayExecutionMap(testExecutionJsonPath); this.testemEvents.setModuleQueue('1', this.moduleQueue, false, true); assert.deepEqual( this.testemEvents.stateManager.getReplayExecutionModuleQueue('1'), testExecutionJson.executionMapping['1'], ); }); it('throws error if ReplayExecutionMap is not set when setting replayExecutionModuleQueue for replay-execution mode', function () { assert.throws( () => this.testemEvents.setModuleQueue(1, this.moduleQueue, false, true), /No replay execution map was set on the stateManager/, 'Error is thrown', ); }); }); describe('nextModuleResponse', function () { const socket = { events: [], emit: function (event, payload) { this.events.push(event); if (payload) { this.events.push(payload); } }, reset: function () { this.events.length = 0; }, }; const fooResponse = { done: false, value: 'foo', }; const emptyMap = new Map(); afterEach(function () { socket.reset(); }); it('should fire next-module-response event and save the moduleName to stateManager.moduleMap when write-execution-file is true', function () { this.testemEvents.stateManager.setTestModuleQueue(this.moduleQueue); this.testemEvents.nextModuleResponse(1, socket, true); assert.deepEqual( socket.events, ['testem:next-module-response', fooResponse], 'testem:next-module-response event was emitted with payload foo', ); assert.deepEqual( this.testemEvents.stateManager.getModuleMap().values().next().value, ['foo'], 'module was correctly saved to the moduleMap', ); }); it('should not save the moduleName to stateManager.moduleMap when write-execution-file is false', function () { this.testemEvents.stateManager.setReplayExecutionModuleQueue([], 1); this.testemEvents.nextModuleResponse(1, socket, false); assert.deepEqual( this.testemEvents.stateManager.getModuleMap(), emptyMap, 'moduleMap should be in its initial state', ); }); it('should throw error if no moduleQueues were set', function () { assert.throws( () => this.testemEvents.nextModuleResponse(1, socket, false, 'dev'), /No moduleQueue was set/, 'No moduleQueue error was thrown', ); }); }); describe('recordFailedBrowserId', function () { const launcher = { settings: { test_page: 'browser=1', }, }; it('record new browserId if test failed', function () { this.testemEvents.recordFailedBrowserId(launcher, {}); assert.deepEqual( this.testemEvents.stateManager.getFailedBrowsers(), [1], 'failed browserId 1 is correctly recorded', ); }); it('does not record browserId that has already been recorded', function () { this.testemEvents.recordFailedBrowserId(launcher, {}); this.testemEvents.recordFailedBrowserId(launcher, {}); assert.deepEqual( this.testemEvents.stateManager.getFailedBrowsers(), [1], 'failed browserId 1 is correctly recorded only once', ); }); }); describe('completedBrowsersHandler', function () { const mockUi = { writeLine: () => {}, }; it('should increment completedBrowsers only when completedBrowsers is less than browserCount', function () { this.testemEvents.completedBrowsersHandler( 2, 1, mockUi, new Map([ ['loadBalance', true], ['writeExecutionFile', false], ]), '0000', ); assert.strictEqual( this.testemEvents.stateManager.getCompletedBrowser(), 1, 'completedBrowsers was incremented', ); }); it('should write test-execution file and cleanup state when completedBrowsers equals browserCount and load-balance is true', function () { this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.completedBrowsersHandler( 1, 1, mockUi, new Map([ ['loadBalance', true], ['writeExecutionFile', true], ]), '0000', ); const actual = fs.readFileSync( path.join(fixtureDir, 'test-execution-0000.json'), ); assert.deepEqual(JSON.parse(actual), { numberOfBrowsers: 1, failedBrowsers: [], executionMapping: { 1: ['a'], }, }); }); it('should write module-run-details file and cleanup state when completedBrowsers equals browserCount, load-balance is true, and write-execution-file is false', function () { this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'a', testName: 'test', passed: true, failed: false, skipped: false, duration: 1, }); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.completedBrowsersHandler( 1, 1, mockUi, new Map([ ['loadBalance', true], ['writeModuleMetadataFile', true], ]), '0000', ); const actual = fs.readFileSync( path.join(fixtureDir, 'module-metadata-0000.json'), ); assert.deepEqual(JSON.parse(actual).modules, [ { moduleName: 'a', total: 1, passed: 1, failed: 0, skipped: 0, duration: 1, failedTests: [], }, ]); }); it('should write module-run-details file with sorted by duration', function () { this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'a', testName: 'test 1', passed: true, failed: false, skipped: false, duration: 1, }); this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'a', testName: 'test 2', passed: false, failed: true, skipped: false, duration: 8, }); this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'b', testName: 'test 1', passed: true, failed: false, skipped: false, duration: 1, }); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.completedBrowsersHandler( 1, 1, mockUi, new Map([ ['loadBalance', true], ['writeModuleMetadataFile', true], ]), '0000', ); const actual = fs.readFileSync( path.join(fixtureDir, 'module-metadata-0000.json'), ); assert.deepEqual(JSON.parse(actual).modules, [ { moduleName: 'a', total: 2, passed: 1, failed: 1, skipped: 0, duration: 9, failedTests: ['test 2'], }, { moduleName: 'b', total: 1, passed: 1, failed: 0, skipped: 0, duration: 1, failedTests: [], }, ]); }); it('should add skipped test number to write module-metadata-file with sorted by duration', function () { this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'a', testName: 'test 1', passed: true, failed: false, skipped: true, duration: 0, }); this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'a', testName: 'test 2', passed: false, failed: true, skipped: false, duration: 8, }); this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'b', testName: 'test 1', passed: true, failed: false, skipped: false, duration: 1, }); this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'b', testName: 'test 1', passed: true, failed: false, skipped: false, duration: 0, }); this.testemEvents.stateManager.addToModuleMetadata({ moduleName: 'b', testName: 'test 1', paseed: true, failed: false, skipped: true, duration: 1, }); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.completedBrowsersHandler( 1, 1, mockUi, new Map([ ['loadBalance', true], ['writeModuleMetadataFile', true], ]), '0000', ); const actual = fs.readFileSync( path.join(fixtureDir, 'module-metadata-0000.json'), ); assert.deepEqual(JSON.parse(actual).modules, [ { moduleName: 'a', total: 2, passed: 0, failed: 1, skipped: 1, duration: 8, failedTests: ['test 2'], }, { moduleName: 'b', total: 3, passed: 2, failed: 0, skipped: 1, duration: 2, failedTests: [], }, ]); }); it('should increment completedBrowsers when load-balance is false', function () { this.testemEvents.completedBrowsersHandler( 2, 1, mockUi, new Map([ ['loadBalance', false], ['writeExecutionFile', false], ]), '0000', ); assert.strictEqual( this.testemEvents.stateManager.getCompletedBrowser(), 1, 'completedBrowsers was incremented', ); }); it('should not clean up states from stateManager when test execution is not completed', function () { this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1); this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('b', 2); this.testemEvents.completedBrowsersHandler( 2, 1011, mockUi, new Map([['loadBalance', true]]), '0000', ); assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 2); }); it('should clean up states from stateManager when test execution is completed', function () { const mockReplayExecutionMap = { 1: ['a'] }; this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1); this.testemEvents.stateManager.setReplayExecutionMap( mockReplayExecutionMap, ); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.completedBrowsersHandler( 1, 1010, mockUi, new Map([['loadBalance', true]]), '0000', ); assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 0); assert.deepEqual( this.testemEvents.stateManager.getTestModuleQueue(), null, ); assert.deepEqual( this.testemEvents.stateManager.getReplayExecutionModuleQueue(), null, ); assert.deepEqual( this.testemEvents.stateManager.getReplayExecutionMap(), mockReplayExecutionMap, ); }); it('should clean up states from stateManager when all launched browsers complete tests', function () { const mockReplayExecutionMap = { 1: ['a'] }; this.testemEvents.stateManager.addModuleNameToReplayExecutionMap('a', 1); this.testemEvents.stateManager.setReplayExecutionMap( mockReplayExecutionMap, ); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.completedBrowsersHandler( 1, 1010, mockUi, new Map([['loadBalance', true]]), '0000', ); assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 0); assert.deepEqual( this.testemEvents.stateManager.getTestModuleQueue(), null, ); assert.deepEqual( this.testemEvents.stateManager.getReplayExecutionModuleQueue(), null, ); assert.deepEqual( this.testemEvents.stateManager.getReplayExecutionMap(), mockReplayExecutionMap, ); }); it('should clean up states from stateManager when all launched browsers exited', function () { this.testemEvents.stateManager.setTestModuleQueue([]); this.testemEvents.stateManager.addToStartedLaunchers(1010); this.testemEvents.stateManager.addToStartedLaunchers(1011); this.testemEvents.completedBrowsersHandler( 2, 1010, mockUi, new Map([['loadBalance', true]]), '0000', ); assert.deepEqual(this.testemEvents.stateManager.getModuleMap().size, 0); assert.deepEqual(this.testemEvents.stateManager.getTestModuleQueue(), []); this.testemEvents.completedBrowsersHandler( 2, 1011, mockUi, new Map([['loadBalance', true]]), '0000', ); assert.deepEqual( this.testemEvents.stateManager.getTestModuleQueue(), null, ); }); }); }); ================================================ FILE: node-tests/unit/utils/tests-options-validator-test.js ================================================ 'use strict'; const assert = require('assert'); const fixturify = require('fixturify'); const fs = require('fs-extra'); const TestOptionsValidator = require('../../../lib/utils/tests-options-validator'); const TestExecutionJson = { numberOfBrowsers: 2, browserToModuleMap: { 1: ['/tests/integration/components/my-component-test'], 2: ['/tests/integration/components/navigating-component-test'], }, }; describe('TestOptionsValidator', function () { function validateCommand(validator, cmd) { switch (cmd) { case 'Split': return validator.validateSplit(); case 'Random': return validator.validateRandom(); case 'Parallel': return validator.validateParallel(); case 'writeExecutionFile': return validator.validateWriteExecutionFile(); case 'LoadBalance': return validator.validateLoadBalance(); case 'ReplayExecution': return validator.validateReplayExecution(); default: throw new Error('invalid command passed'); } } function shouldThrow(cmd, options, message, emberCliVer = '3.7.0') { const validator = new TestOptionsValidator(options, emberCliVer); assert.throws(() => validateCommand(validator, cmd), message); } function shouldEqual(cmd, options, value, emberCliVer = '3.7.0') { const validator = new TestOptionsValidator(options, emberCliVer); assert.strictEqual(validateCommand(validator, cmd), value); } function shouldWarn(cmd, options, value, emberCliVer = '3.7.0') { let originalWarn = console.warn; let warnCalled = 0; let warnMessage = ''; console.warn = function (message) { warnCalled++; warnMessage = message; }; const validator = new TestOptionsValidator(options, emberCliVer); assert.notEqual(validateCommand(validator, cmd), undefined); assert.strictEqual(warnCalled, 1); assert.strictEqual(warnMessage, value); console.warn = originalWarn; } describe('shouldSplit', function () { function shouldSplitThrows(options, message) { shouldThrow('Split', options, message); } function shouldSplitEqual(options, message) { shouldEqual('Split', options, message); } it('should log a warning if `split` is less than 2', function () { shouldWarn( 'Split', { split: 1 }, 'You should specify a number of files greater than 1 to split your tests across. Defaulting to 1 split which is the same as not using `split`.', ); }); it('should throw an error if `partition` is used without `split`', function () { shouldSplitThrows( { partition: [1] }, /You must specify a `split` value in order to use `partition`/, ); }); it('should throw an error if `partition` contains a value less than 1', function () { shouldSplitThrows( { split: 2, partition: [1, 0] }, /Split tests are one-indexed, so you must specify partition values greater than or equal to 1./, ); }); it('should throw an error if `partition` contains a value greater than `split`', function () { shouldSplitThrows( { split: 2, partition: [1, 3] }, /You must specify `partition` values that are less than or equal to your `split` value./, ); }); it('should throw an error if `partition` contains duplicate values', function () { shouldSplitThrows( { split: 2, partition: [1, 2, 1] }, /You cannot specify the same partition value twice. 1 is repeated./, ); }); it('should return true if using `split`', function () { shouldSplitEqual({ split: 2 }, true); }); it('should return true if using `split` and `partition`', function () { shouldSplitEqual({ split: 2, partition: [1] }, true); }); it('should return false if not using `split`', function () { shouldSplitEqual({}, false); }); }); describe('shouldRandomize', function () { function shouldRandomizeEqual(options, message) { shouldEqual('Random', options, message); } it('should return true if `random` is an empty string', function () { shouldRandomizeEqual({ random: '' }, true); }); it('should return true if `random` is set to a string', function () { shouldRandomizeEqual({ random: '1337' }, true); }); it('should return false if `random` is a non-string', function () { shouldRandomizeEqual({ random: true }, false); }); it('should return false if `random` is not used', function () { shouldRandomizeEqual({}, false); }); }); describe('shouldParallelize', function () { it('should throw an error if `parallel` is not a numeric value', function () { shouldThrow( 'Parallel', { parallel: '--reporter' }, /EmberExam: You must specify a Numeric value to 'parallel'. Value passed: --reporter/, ); }); it('should throw an error if `split` is not being used', function () { shouldThrow( 'Parallel', { parallel: 1 }, /You must specify the `split` option in order to run your tests in parallel/, ); }); it('should throw an error if used with `replay-execution`', function () { shouldThrow( 'Parallel', { replayExecution: 'abc', parallel: 1 }, /You must not use the `replay-execution` option with the `parallel` option./, ); }); it('should throw an error if used with `replay-browser`', function () { shouldThrow( 'Parallel', { replayBrowser: 2, parallel: 1 }, /You must not use the `replay-browser` option with the `parallel` option./, ); }); it('should throw an error if parallel is > 1 when used with `split`', function () { shouldThrow( 'Parallel', { split: 2, parallel: 2 }, /When used with `split` or `partition`, `parallel` does not accept a value other than 1./, ); }); it('should throw an error if 0 is passed while loadBalance is specified', function () { shouldThrow( 'Parallel', { loadBalance: 2, parallel: 0 }, /You must specify a value greater than 1 to `parallel`./, ); }); it('should return true', function () { shouldEqual('Parallel', { split: 2, parallel: 1 }, true); }); }); describe('ShouldWriteExecutionFile', function () { it('should return false when not passed', function () { shouldEqual( 'writeExecutionFile', { loadBalance: true, parallel: 2, launch: 'false', }, false, ); }); it('should throw an error if `write-execution-file` is used without `load-balance`', function () { shouldThrow( 'writeExecutionFile', { writeExecutionFile: true }, /You must run test suite with the `load-balance` option in order to use the `write-execution-file` option./, ); }); it('should throw an error if `write-execution-file` is used without `load-balance`', function () { shouldThrow( 'writeExecutionFile', { split: 2, partition: 1, writeExecutionFile: true, }, /You must run test suite with the `load-balance` option in order to use the `write-execution-file` option./, ); }); it('should throw an error if `write-execution-file` is used without `load-balance`', function () { shouldThrow( 'writeExecutionFile', { replayExecution: 'test-execution-0000000.json', replayBrowser: [1, 2], writeExecutionFile: true, }, /You must run test suite with the `load-balance` option in order to use the `write-execution-file` option./, ); }); it('should throw an error if `write-execution-file` is used with `no-launch`', function () { shouldThrow( 'writeExecutionFile', { loadBalance: true, parallel: 1, launch: 'false', writeExecutionFile: true, }, /You must not use no-launch with write-execution-file option./, ); }); it('should return true', function () { shouldEqual( 'writeExecutionFile', { loadBalance: true, parallel: 2, writeExecutionFile: true, }, true, ); }); }); describe('shouldLoadBalance', function () { it('should throw an error if ember-cli version is below 3.2.0', function () { shouldThrow( 'LoadBalance', { loadBalance: true, replayExecution: 'abc' }, /You must be using ember-cli version \^3.2.0 for this feature to work properly./, '3.0.0', ); }); it('should throw an error if ember-cli version is ~3.1.0', function () { shouldThrow( 'LoadBalance', { loadBalance: true, replayExecution: 'abc' }, /You must be using ember-cli version \^3.2.0 for this feature to work properly./, '~3.1.0', ); }); it('should throw an error if `replayExecution` is passed', function () { shouldThrow( 'LoadBalance', { loadBalance: true, replayExecution: 'abc' }, /You must not use the `replay-execution` option with the `load-balance` option./, ); }); it('should throw an error if `replayBrowser` is passed', function () { shouldThrow( 'LoadBalance', { loadBalance: true, replayBrowser: 3 }, /You must not use the `replay-browser` option with the `load-balance` option./, ); }); it('should throw an error if `parallel` is not defined', function () { shouldThrow( 'LoadBalance', { loadBalance: true }, /You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`./, ); }); it('should throw an error if `parallel` has a value less than 1', function () { shouldThrow( 'LoadBalance', { loadBalance: true, parallel: 0 }, /You should specify the number of browsers to load-balance against using `parallel` when using `load-balance`./, ); }); it('should throw an error if `no-launch` is passed', function () { shouldThrow( 'LoadBalance', { loadBalance: true, parallel: 0, launch: 'false' }, /You must not use `no-launch` option with the `load-balance` option./, ); }); it('should return true', function () { shouldEqual('LoadBalance', { loadBalance: true, parallel: 3 }, true); }); }); describe('shouldReplayExecution', function () { before(function () { fixturify.writeSync(process.cwd(), { 'test-execution-0000000.json': JSON.stringify(TestExecutionJson), }); }); after(function () { fs.unlink('test-execution-0000000.json'); }); it('should throw an error if `replay-browser` contains a value less than 1', function () { shouldThrow( 'ReplayExecution', { replayExecution: 'test-execution-0000000.json', replayBrowser: [1, 0], }, /You must specify replay-browser values greater than or equal to 1./, ); }); it('should throw an error if `replay-browser` contains duplicate values', function () { shouldThrow( 'ReplayExecution', { replayExecution: 'test-execution-0000000.json', replayBrowser: [1, 2, 1], }, /You cannot specify the same replayBrowser value twice. 1 is repeated./, ); }); it('should throw an error if `replay-browser` contains an invalid browser number', function () { shouldThrow( 'ReplayExecution', { replayExecution: 'test-execution-0000000.json', replayBrowser: [3, 1], }, /You must specify replayBrowser value smaller than a number of browsers in the specified json file./, ); }); it('should throw an error if `no-launch` is used with `replay-execution`.', function () { shouldThrow( 'ReplayExecution', { replayExecution: 'test-execution-0000000.json', launch: 'false', }, /You must not use `no-launch` option with the `replay-execution` option./, ); }); it('should return true', function () { shouldEqual( 'ReplayExecution', { replayExecution: 'test-execution-0000000.json', replayBrowser: [1, 2], }, true, ); }); }); }); ================================================ FILE: package.json ================================================ { "name": "ember-exam", "version": "10.1.0", "description": "Run your tests with randomization, splitting, and parallelization for beautiful tests.", "keywords": [ "ember-addon" ], "homepage": "https://ember-cli.github.io/ember-exam", "repository": { "type": "git", "url": "https://github.com/ember-cli/ember-exam.git" }, "license": "MIT", "author": "", "directories": { "doc": "doc", "test": "tests" }, "scripts": { "build": "ember build --environment=production", "coverage": "nyc report --reporter=text-lcov | codeclimate-test-reporter", "format": "prettier . --write", "lint": "concurrently \"npm:lint:*(!fix)\" --names \"lint:\"", "lint:format": "prettier . --cache --check", "lint:js": "eslint . && pnpm --filter './test-apps/**' lint:js", "start": "ember serve", "test": "concurrently \"npm:test:*\"", "test:ember": "ember test", "test:ember-compatibility": "ember try:each", "test:node": "nyc mocha 'node-tests/**/*-test.js'" }, "nyc": { "exclude": [ "config", "node-tests", "tests" ] }, "dependencies": { "@babel/core": "^7.28.0", "chalk": "^5.3.0", "cli-table3": "^0.6.0", "debug": "^4.2.0", "ember-auto-import": "^2.7.0", "ember-cli-babel": "^8.2.0", "execa": "^8.0.1", "fs-extra": "^11.2.0", "js-yaml": "^4.0.0", "npmlog": "^7.0.0", "rimraf": "^5.0.0", "semver": "^7.3.2", "silent-error": "^1.1.1" }, "devDependencies": { "@babel/plugin-proposal-decorators": "7.28.6", "@ember/optional-features": "2.3.0", "@ember/string": "3.1.1", "@ember/test-helpers": "3.3.1", "@embroider/test-setup": "3.0.3", "auto-dist-tag": "2.1.1", "codeclimate-test-reporter": "0.5.1", "concurrently": "9.2.1", "ember-cli": "5.12.0", "ember-cli-dependency-checker": "3.3.3", "ember-cli-htmlbars": "6.3.0", "ember-cli-inject-live-reload": "2.1.0", "ember-concurrency": "4.0.6", "ember-eslint": "0.6.1", "ember-load-initializers": "2.1.2", "ember-qunit": "9.0.4", "ember-resolver": "11.0.1", "ember-source": "6.10.1", "ember-source-channel-url": "3.0.0", "ember-try": "3.0.0", "eslint": "9.39.4", "fixturify": "3.0.0", "glob": "11.1.0", "globals": "16.5.0", "loader.js": "4.7.0", "mocha": "11.7.5", "nyc": "17.1.0", "prettier": "3.8.1", "prettier-plugin-ember-template-tag": "2.1.3", "qunit": "2.25.0", "release-plan": "^0.17.4", "rsvp": "4.8.5", "sinon": "21.0.3", "typescript": "5.9.3", "webpack": "5.104.1" }, "peerDependencies": { "ember-qunit": "*", "ember-source": ">= 4.0.0", "qunit": "*" }, "packageManager": "pnpm@10.33.0", "engines": { "node": ">= 18" }, "volta": { "node": "18.20.8" }, "publishConfig": { "registry": "https://registry.npmjs.org" }, "pnpm": { "overrides": { "@glimmer/syntax": "^0.95.0", "ember-exam": "workspace:*" } }, "ember": { "edition": "octane" }, "ember-addon": { "configPath": "tests/dummy/config" } } ================================================ FILE: pnpm-workspace.yaml ================================================ packages: - . - ./docs-app - test-apps/* ## We do not want to auto-install peers because ## We want to ensure we understand what is actually required ## So that if a consuming app uses strict mode things will work ## ## This said, Apps should probably set this to true autoInstallPeers: false ## We use so many similarly grouped peers, we want to make the ## peer-groups easier to distinguish. ## This forces a shorter sha for all groups (vs the default of 1000) peersSuffixMaxLength: 40 virtualStoreDirMaxLength: 40 ## If a dependency is not declared, we do not want to accidentally ## resolve it from the workspace root. This is a common source of ## bugs in monorepos. resolvePeersFromWorkspaceRoot: false ## This also means we do not want to hoist them to the root ## As this would both expose them to all other packages AND ## results in them using symlinks instead of hardlinks hoistWorkspacePackages: false ## Our Workspace Packages are "injected" so prevent ## devDependencies from being exposed and to allow ## for us to test optional peerDependencies. injectWorkspacePackages: true ## Update injected dependencies when needed. ## Unfortunately, this does not run after scripts in ## the monorepo root, so we have added a special "sync" ## script to handle this. ## ## NOTE: sync always happens after install (automatically from pnpm). ## this script exists so we can manually sync injected deps if we need to syncInjectedDepsAfterScripts: - sync ## It may also be good to understand that we intentionally are ## not using `hoisting` and using `injected` workspace packages ## to ensure properly isolated dep trees for test apps. # ## things like `moduleExists` from @embroider/macros will report false answers ## for the test apps unless we avoid hoisting. ## ## Note, if we ever need to hoist something, we can use hoist-pattern[]="" ## For instance: hoist-pattern[]=*node-fetch* ## to hoist the specific thing we need and set this to `true`. When true ## and a hoist-pattern is present only the hoist-pattern will be hoisted. hoist: false ## In keeping with our "no hoisting" and "no auto-peers" and ## "isolated dep trees", we also want to avoid other things ## that lead to reliance on hoisting. ## In general, deduping leads to hoisting. This particular ## setting causes direct-dependencies to resolve from the ## workspace root if already in root. We don't want this. dedupeDirectDeps: false ## We do not want to dedupe peer dependencies as this ## results in hoisting and violates optional peer isolation. dedupePeerDependents: false ## We do not want to dedupe injected dependencies as this ## results in hoisting and violates optional peer isolation. dedupeInjectedDeps: false overrides: # Ember-try brings in old dependencies that include a bad set of private / unpublished glimmer stuff '@glimmer/syntax': '^0.94.0' ================================================ FILE: test-apps/broccoli/.editorconfig ================================================ # EditorConfig helps developers define and maintain consistent # coding styles between different editors and IDEs # editorconfig.org root = true [*] end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true indent_style = space indent_size = 2 [*.hbs] insert_final_newline = false [*.{diff,md}] trim_trailing_whitespace = false ================================================ FILE: test-apps/broccoli/.ember-cli ================================================ { /** Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript rather than JavaScript by default, when a TypeScript version of a given blueprint is available. */ "isTypeScriptProject": false } ================================================ FILE: test-apps/broccoli/.github/workflows/ci.yml ================================================ name: CI on: push: branches: - main - master pull_request: {} concurrency: group: ci-${{ github.head_ref || github.ref }} cancel-in-progress: true jobs: lint: name: "Lint" runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - name: Install Node uses: actions/setup-node@v6 with: node-version: 18 cache: npm - name: Install Dependencies run: npm ci - name: Lint run: npm run lint test: name: "Test" runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - name: Install Node uses: actions/setup-node@v6 with: node-version: 18 cache: npm - name: Install Dependencies run: npm ci - name: Run Tests run: npm test ================================================ FILE: test-apps/broccoli/.gitignore ================================================ # compiled output /dist/ /declarations/ # dependencies /node_modules/ # misc /.env* /.pnp* /.eslintcache /coverage/ /npm-debug.log* /testem.log /yarn-error.log # ember-try /.node_modules.ember-try/ /npm-shrinkwrap.json.ember-try /package.json.ember-try /package-lock.json.ember-try /yarn.lock.ember-try # broccoli-debug /DEBUG/ ================================================ FILE: test-apps/broccoli/.prettierignore ================================================ # unconventional js /blueprints/*/files/ # compiled output /dist/ # misc /coverage/ !.* .*/ # ember-try /.node_modules.ember-try/ ================================================ FILE: test-apps/broccoli/.prettierrc.js ================================================ 'use strict'; module.exports = { overrides: [ { files: '*.{js,ts}', options: { singleQuote: true, }, }, ], }; ================================================ FILE: test-apps/broccoli/.stylelintignore ================================================ # unconventional files /blueprints/*/files/ # compiled output /dist/ # addons /.node_modules.ember-try/ ================================================ FILE: test-apps/broccoli/.stylelintrc.js ================================================ 'use strict'; module.exports = { extends: ['stylelint-config-standard', 'stylelint-prettier/recommended'], }; ================================================ FILE: test-apps/broccoli/.template-lintrc.js ================================================ 'use strict'; module.exports = { extends: 'recommended', }; ================================================ FILE: test-apps/broccoli/.watchmanconfig ================================================ { "ignore_dirs": ["dist"] } ================================================ FILE: test-apps/broccoli/README.md ================================================ # broccoli This README outlines the details of collaborating on this Ember application. A short introduction of this app could easily go here. ## Prerequisites You will need the following things properly installed on your computer. - [Git](https://git-scm.com/) - [Node.js](https://nodejs.org/) (with npm) - [Ember CLI](https://cli.emberjs.com/release/) - [Google Chrome](https://google.com/chrome/) ## Installation - `git clone ` this repository - `cd broccoli` - `npm install` ## Running / Development - `npm run start` - Visit your app at [http://localhost:4200](http://localhost:4200). - Visit your tests at [http://localhost:4200/tests](http://localhost:4200/tests). ### Code Generators Make use of the many generators for code, try `ember help generate` for more details ### Running Tests - `npm run test` - `npm run test:ember -- --server` ### Linting - `npm run lint` - `npm run lint:fix` ### Building - `npm exec ember build` (development) - `npm run build` (production) ### Deploying Specify what it takes to deploy your app. ## Further Reading / Useful Links - [ember.js](https://emberjs.com/) - [ember-cli](https://cli.emberjs.com/release/) - Development Browser Extensions - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi) - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/) ================================================ FILE: test-apps/broccoli/app/app.js ================================================ import Application from '@ember/application'; import Resolver from 'ember-resolver'; import loadInitializers from 'ember-load-initializers'; import config from 'broccoli/config/environment'; export default class App extends Application { modulePrefix = config.modulePrefix; podModulePrefix = config.podModulePrefix; Resolver = Resolver; } loadInitializers(App, config.modulePrefix); ================================================ FILE: test-apps/broccoli/app/components/.gitkeep ================================================ ================================================ FILE: test-apps/broccoli/app/controllers/.gitkeep ================================================ ================================================ FILE: test-apps/broccoli/app/helpers/.gitkeep ================================================ ================================================ FILE: test-apps/broccoli/app/index.html ================================================ Broccoli {{content-for "head"}} {{content-for "head-footer"}} {{content-for "body"}} {{content-for "body-footer"}} ================================================ FILE: test-apps/broccoli/app/models/.gitkeep ================================================ ================================================ FILE: test-apps/broccoli/app/router.js ================================================ import EmberRouter from '@ember/routing/router'; import config from 'broccoli/config/environment'; export default class Router extends EmberRouter { location = config.locationType; rootURL = config.rootURL; } Router.map(function () {}); ================================================ FILE: test-apps/broccoli/app/routes/.gitkeep ================================================ ================================================ FILE: test-apps/broccoli/app/styles/app.css ================================================ /* Ember supports plain CSS out of the box. More info: https://cli.emberjs.com/release/advanced-use/stylesheets/ */ ================================================ FILE: test-apps/broccoli/app/templates/application.hbs ================================================ {{page-title "Broccoli"}} {{outlet}} {{! The following component displays Ember's default welcome message. }} {{! Feel free to remove this! }} ================================================ FILE: test-apps/broccoli/config/ember-cli-update.json ================================================ { "schemaVersion": "1.0.0", "packages": [ { "name": "ember-cli", "version": "5.12.0", "blueprints": [ { "name": "app", "outputRepo": "https://github.com/ember-cli/ember-new-output", "codemodsSource": "ember-app-codemods-manifest@1", "isBaseBlueprint": true, "options": [ "--ci-provider=github" ] } ] } ] } ================================================ FILE: test-apps/broccoli/config/environment.js ================================================ 'use strict'; module.exports = function (environment) { const ENV = { modulePrefix: 'broccoli', environment, rootURL: '/', locationType: 'history', EmberENV: { EXTEND_PROTOTYPES: false, FEATURES: { // Here you can enable experimental features on an ember canary build // e.g. EMBER_NATIVE_DECORATOR_SUPPORT: true }, }, APP: { // Here you can pass flags/options to your application instance // when it is created }, }; if (environment === 'development') { // ENV.APP.LOG_RESOLVER = true; // ENV.APP.LOG_ACTIVE_GENERATION = true; // ENV.APP.LOG_TRANSITIONS = true; // ENV.APP.LOG_TRANSITIONS_INTERNAL = true; // ENV.APP.LOG_VIEW_LOOKUPS = true; } if (environment === 'test') { // Testem prefers this... ENV.locationType = 'none'; // keep test console output quieter ENV.APP.LOG_ACTIVE_GENERATION = false; ENV.APP.LOG_VIEW_LOOKUPS = false; ENV.APP.rootElement = '#ember-testing'; ENV.APP.autoboot = false; } if (environment === 'production') { // here you can enable a production-specific feature } return ENV; }; ================================================ FILE: test-apps/broccoli/config/optional-features.json ================================================ { "application-template-wrapper": false, "default-async-observers": true, "jquery-integration": false, "template-only-glimmer-components": true, "no-implicit-route-model": true } ================================================ FILE: test-apps/broccoli/config/targets.js ================================================ 'use strict'; const browsers = [ 'last 1 Chrome versions', 'last 1 Firefox versions', 'last 1 Safari versions', ]; module.exports = { browsers, }; ================================================ FILE: test-apps/broccoli/ember-cli-build.js ================================================ 'use strict'; const EmberApp = require('ember-cli/lib/broccoli/ember-app'); module.exports = function (defaults) { const app = new EmberApp(defaults, { // Add options here }); return app.toTree(); }; ================================================ FILE: test-apps/broccoli/eslint.config.mjs ================================================ import { ember } from 'ember-eslint'; import * as url from 'url'; // Needed until Node 20 const dirname = url.fileURLToPath(new URL('.', import.meta.url)); export default [...ember.recommended(dirname)]; ================================================ FILE: test-apps/broccoli/package.json ================================================ { "name": "broccoli", "version": "0.0.0", "private": true, "description": "Small description for broccoli goes here", "repository": "", "license": "MIT", "author": "", "directories": { "doc": "doc", "test": "tests" }, "scripts": { "build": "ember build --environment=production", "lint": "concurrently \"npm:lint:*(!fix)\" --names \"lint:\"", "lint:fix": "concurrently \"npm:lint:*:fix\" --names \"fix:\"", "lint:hbs": "ember-template-lint .", "lint:hbs:fix": "ember-template-lint . --fix", "lint:js": "eslint . --cache", "lint:js:fix": "eslint . --fix", "start": "ember serve", "test": "concurrently \"npm:lint\" \"npm:test:*\" --names \"lint,test:\"", "test:ember": "ember test" }, "devDependencies": { "@babel/core": "7.28.6", "@babel/plugin-proposal-decorators": "7.28.6", "@ember/optional-features": "2.3.0", "@ember/string": "4.0.1", "@ember/test-helpers": "3.3.1", "@glimmer/component": "1.1.2", "@glimmer/tracking": "1.1.2", "broccoli-asset-rev": "3.0.0", "concurrently": "8.2.2", "ember-auto-import": "2.12.1", "ember-cli": "5.12.0", "ember-cli-app-version": "7.0.0", "ember-cli-babel": "8.2.0", "ember-cli-clean-css": "3.0.0", "ember-cli-dependency-checker": "3.3.3", "ember-cli-htmlbars": "6.3.0", "ember-cli-inject-live-reload": "2.1.0", "ember-cli-sri": "2.1.1", "ember-cli-terser": "4.0.2", "ember-eslint": "0.6.1", "ember-load-initializers": "2.1.2", "ember-modifier": "4.2.2", "ember-page-title": "8.2.4", "ember-qunit": "8.1.1", "ember-resolver": "12.0.1", "ember-source": "5.12.0", "ember-template-lint": "6.1.0", "ember-welcome-page": "7.0.2", "eslint": "9.39.4", "loader.js": "4.7.0", "prettier": "3.8.1", "qunit": "2.25.0", "qunit-dom": "3.5.0", "tracked-built-ins": "3.4.0", "webpack": "5.104.1" }, "dependencies": { "ember-exam": "workspace:*" }, "engines": { "node": ">= 18" }, "ember": { "edition": "octane" } } ================================================ FILE: test-apps/broccoli/public/robots.txt ================================================ # http://www.robotstxt.org User-agent: * Disallow: ================================================ FILE: test-apps/broccoli/testem.js ================================================ 'use strict'; module.exports = { test_page: 'tests/index.html?hidepassed', disable_watching: true, launch_in_ci: ['Chrome'], launch_in_dev: ['Chrome'], browser_start_timeout: 120, browser_args: { Chrome: { ci: [ // --no-sandbox is needed when running Chrome inside a container process.env.CI ? '--no-sandbox' : null, '--headless', '--disable-dev-shm-usage', '--disable-software-rasterizer', '--mute-audio', '--remote-debugging-port=0', '--window-size=1440,900', ].filter(Boolean), }, }, }; ================================================ FILE: test-apps/broccoli/tests/index.html ================================================ Broccoli Tests {{content-for "head"}} {{content-for "test-head"}} {{content-for "head-footer"}} {{content-for "test-head-footer"}} {{content-for "body"}} {{content-for "test-body"}}
{{content-for "body-footer"}} {{content-for "test-body-footer"}} ================================================ FILE: test-apps/broccoli/tests/test-helper.js ================================================ import Application from 'broccoli/app'; import config from 'broccoli/config/environment'; import * as QUnit from 'qunit'; import { setApplication } from '@ember/test-helpers'; import { setup } from 'qunit-dom'; import { start } from 'ember-qunit'; setApplication(Application.create(config.APP)); setup(QUnit.assert); start(); ================================================ FILE: test-apps/embroider3-webpack/.editorconfig ================================================ # EditorConfig helps developers define and maintain consistent # coding styles between different editors and IDEs # editorconfig.org root = true [*] end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true indent_style = space indent_size = 2 [*.hbs] insert_final_newline = false [*.{diff,md}] trim_trailing_whitespace = false ================================================ FILE: test-apps/embroider3-webpack/.ember-cli ================================================ { /** Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript rather than JavaScript by default, when a TypeScript version of a given blueprint is available. */ "isTypeScriptProject": false, /** Setting `componentAuthoringFormat` to "strict" will force the blueprint generators to generate GJS or GTS files for the component and the component rendering test. "loose" is the default. */ "componentAuthoringFormat": "loose", /** Setting `routeAuthoringFormat` to "strict" will force the blueprint generators to generate GJS or GTS templates for routes. "loose" is the default */ "routeAuthoringFormat": "loose" } ================================================ FILE: test-apps/embroider3-webpack/.github/workflows/ci.yml ================================================ name: CI on: push: branches: - main - master pull_request: {} concurrency: group: ci-${{ github.head_ref || github.ref }} cancel-in-progress: true jobs: lint: name: "Lint" runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - name: Install Node uses: actions/setup-node@v6 with: node-version: 18 cache: npm - name: Install Dependencies run: npm ci - name: Lint run: npm run lint test: name: "Test" runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - name: Install Node uses: actions/setup-node@v6 with: node-version: 18 cache: npm - name: Install Dependencies run: npm ci - name: Run Tests run: npm test ================================================ FILE: test-apps/embroider3-webpack/.gitignore ================================================ # compiled output /dist/ /declarations/ # dependencies /node_modules/ # misc /.env* /.pnp* /.eslintcache /coverage/ /npm-debug.log* /testem.log /yarn-error.log # broccoli-debug /DEBUG/ ================================================ FILE: test-apps/embroider3-webpack/.prettierignore ================================================ # unconventional js /blueprints/*/files/ # compiled output /dist/ # misc /coverage/ !.* .*/ /pnpm-lock.yaml ember-cli-update.json *.html ================================================ FILE: test-apps/embroider3-webpack/.prettierrc.js ================================================ 'use strict'; module.exports = { plugins: ['prettier-plugin-ember-template-tag'], overrides: [ { files: '*.{js,gjs,ts,gts,mjs,mts,cjs,cts}', options: { singleQuote: true, templateSingleQuote: false, }, }, ], }; ================================================ FILE: test-apps/embroider3-webpack/.stylelintignore ================================================ # unconventional files /blueprints/*/files/ # compiled output /dist/ ================================================ FILE: test-apps/embroider3-webpack/.stylelintrc.js ================================================ 'use strict'; module.exports = { extends: ['stylelint-config-standard'], }; ================================================ FILE: test-apps/embroider3-webpack/.template-lintrc.js ================================================ 'use strict'; module.exports = { extends: 'recommended', }; ================================================ FILE: test-apps/embroider3-webpack/.watchmanconfig ================================================ { "ignore_dirs": ["dist"] } ================================================ FILE: test-apps/embroider3-webpack/README.md ================================================ # embroider3-webpack This README outlines the details of collaborating on this Ember application. A short introduction of this app could easily go here. ## Prerequisites You will need the following things properly installed on your computer. - [Git](https://git-scm.com/) - [Node.js](https://nodejs.org/) (with npm) - [Ember CLI](https://cli.emberjs.com/release/) - [Google Chrome](https://google.com/chrome/) ## Installation - `git clone ` this repository - `cd embroider3-webpack` - `npm install` ## Running / Development - `npm run start` - Visit your app at [http://localhost:4200](http://localhost:4200). - Visit your tests at [http://localhost:4200/tests](http://localhost:4200/tests). ### Code Generators Make use of the many generators for code, try `ember help generate` for more details ### Running Tests - `npm run test` - `npm run test:ember -- --server` ### Linting - `npm run lint` - `npm run lint:fix` ### Building - `npm exec ember build` (development) - `npm run build` (production) ### Deploying Specify what it takes to deploy your app. ## Further Reading / Useful Links - [ember.js](https://emberjs.com/) - [ember-cli](https://cli.emberjs.com/release/) - Development Browser Extensions - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi) - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/) ================================================ FILE: test-apps/embroider3-webpack/app/app.js ================================================ import Application from '@ember/application'; import Resolver from 'ember-resolver'; import loadInitializers from 'ember-load-initializers'; import config from 'embroider3-webpack/config/environment'; import { importSync, isDevelopingApp, macroCondition } from '@embroider/macros'; if (macroCondition(isDevelopingApp())) { importSync('./deprecation-workflow'); } export default class App extends Application { modulePrefix = config.modulePrefix; podModulePrefix = config.podModulePrefix; Resolver = Resolver; } loadInitializers(App, config.modulePrefix); ================================================ FILE: test-apps/embroider3-webpack/app/components/.gitkeep ================================================ ================================================ FILE: test-apps/embroider3-webpack/app/controllers/.gitkeep ================================================ ================================================ FILE: test-apps/embroider3-webpack/app/deprecation-workflow.js ================================================ import setupDeprecationWorkflow from 'ember-cli-deprecation-workflow'; /** * Docs: https://github.com/ember-cli/ember-cli-deprecation-workflow */ setupDeprecationWorkflow({ /** false by default, but if a developer / team wants to be more aggressive about being proactive with handling their deprecations, this should be set to "true" */ throwOnUnhandled: false, workflow: [ /* ... handlers ... */ /* to generate this list, run your app for a while (or run the test suite), * and then run in the browser console: * * deprecationWorkflow.flushDeprecations() * * And copy the handlers here */ /* example: */ /* { handler: 'silence', matchId: 'template-action' }, */ ], }); ================================================ FILE: test-apps/embroider3-webpack/app/helpers/.gitkeep ================================================ ================================================ FILE: test-apps/embroider3-webpack/app/index.html ================================================ Embroider3Webpack {{content-for "head"}} {{content-for "head-footer"}} {{content-for "body"}} {{content-for "body-footer"}} ================================================ FILE: test-apps/embroider3-webpack/app/models/.gitkeep ================================================ ================================================ FILE: test-apps/embroider3-webpack/app/router.js ================================================ import EmberRouter from '@ember/routing/router'; import config from 'embroider3-webpack/config/environment'; export default class Router extends EmberRouter { location = config.locationType; rootURL = config.rootURL; } Router.map(function () {}); ================================================ FILE: test-apps/embroider3-webpack/app/routes/.gitkeep ================================================ ================================================ FILE: test-apps/embroider3-webpack/app/styles/app.css ================================================ /* Ember supports plain CSS out of the box. More info: https://cli.emberjs.com/release/advanced-use/stylesheets/ */ ================================================ FILE: test-apps/embroider3-webpack/app/templates/application.hbs ================================================ {{page-title "Embroider3Webpack"}} {{outlet}} {{! The following component displays Ember's default welcome message. }} {{! Feel free to remove this! }} ================================================ FILE: test-apps/embroider3-webpack/config/ember-cli-update.json ================================================ { "schemaVersion": "1.0.0", "packages": [ { "name": "ember-cli", "version": "6.6.0", "blueprints": [ { "name": "app", "outputRepo": "https://github.com/ember-cli/ember-new-output", "codemodsSource": "ember-app-codemods-manifest@1", "isBaseBlueprint": true, "options": [ "--embroider", "--ci-provider=github" ] } ] } ] } ================================================ FILE: test-apps/embroider3-webpack/config/environment.js ================================================ 'use strict'; module.exports = function (environment) { const ENV = { modulePrefix: 'embroider3-webpack', environment, rootURL: '/', locationType: 'history', EmberENV: { EXTEND_PROTOTYPES: false, FEATURES: { // Here you can enable experimental features on an ember canary build // e.g. EMBER_NATIVE_DECORATOR_SUPPORT: true }, }, APP: { // Here you can pass flags/options to your application instance // when it is created }, }; if (environment === 'development') { // ENV.APP.LOG_RESOLVER = true; // ENV.APP.LOG_ACTIVE_GENERATION = true; // ENV.APP.LOG_TRANSITIONS = true; // ENV.APP.LOG_TRANSITIONS_INTERNAL = true; // ENV.APP.LOG_VIEW_LOOKUPS = true; } if (environment === 'test') { // Testem prefers this... ENV.locationType = 'none'; // keep test console output quieter ENV.APP.LOG_ACTIVE_GENERATION = false; ENV.APP.LOG_VIEW_LOOKUPS = false; ENV.APP.rootElement = '#ember-testing'; ENV.APP.autoboot = false; } if (environment === 'production') { // here you can enable a production-specific feature } return ENV; }; ================================================ FILE: test-apps/embroider3-webpack/config/optional-features.json ================================================ { "application-template-wrapper": false, "default-async-observers": true, "jquery-integration": false, "template-only-glimmer-components": true, "no-implicit-route-model": true } ================================================ FILE: test-apps/embroider3-webpack/config/targets.js ================================================ 'use strict'; const browsers = [ 'last 1 Chrome versions', 'last 1 Firefox versions', 'last 1 Safari versions', ]; module.exports = { browsers, }; ================================================ FILE: test-apps/embroider3-webpack/ember-cli-build.js ================================================ 'use strict'; const EmberApp = require('ember-cli/lib/broccoli/ember-app'); module.exports = function (defaults) { const app = new EmberApp(defaults, { emberData: { deprecations: { // New projects can safely leave this deprecation disabled. // If upgrading, to opt-into the deprecated behavior, set this to true and then follow: // https://deprecations.emberjs.com/id/ember-data-deprecate-store-extends-ember-object // before upgrading to Ember Data 6.0 DEPRECATE_STORE_EXTENDS_EMBER_OBJECT: false, }, }, // Add options here }); const { Webpack } = require('@embroider/webpack'); return require('@embroider/compat').compatBuild(app, Webpack, { staticAddonTestSupportTrees: true, staticAddonTrees: true, staticEmberSource: true, staticInvokables: true, skipBabel: [ { package: 'qunit', }, ], }); }; ================================================ FILE: test-apps/embroider3-webpack/eslint.config.mjs ================================================ import { ember } from 'ember-eslint'; import * as url from 'url'; // Needed until Node 20 const dirname = url.fileURLToPath(new URL('.', import.meta.url)); export default [...ember.recommended(dirname)]; ================================================ FILE: test-apps/embroider3-webpack/package.json ================================================ { "name": "embroider3-webpack", "version": "0.0.0", "private": true, "description": "Small description for embroider3-webpack goes here", "repository": "", "license": "MIT", "author": "", "directories": { "doc": "doc", "test": "tests" }, "scripts": { "build": "ember build --environment=production", "format": "prettier . --cache --write", "lint": "concurrently \"npm:lint:*(!fix)\" --names \"lint:\" --prefixColors auto", "lint:fix": "concurrently \"npm:lint:*:fix\" --names \"fix:\" --prefixColors auto && npm run format", "lint:format": "prettier . --cache --check", "lint:js": "eslint . --cache", "lint:js:fix": "eslint . --fix", "start": "ember serve", "test": "concurrently \"npm:lint\" \"npm:test:*\" --names \"lint,test:\" --prefixColors auto", "test:ember": "ember test" }, "devDependencies": { "@babel/core": "7.28.6", "@babel/plugin-proposal-decorators": "7.28.6", "@ember/optional-features": "2.3.0", "@ember/test-helpers": "5.4.1", "@embroider/compat": "3.9.3", "@embroider/core": "3.5.9", "@embroider/macros": "1.19.7", "@embroider/webpack": "4.1.2", "@glimmer/component": "2.0.0", "@glimmer/tracking": "1.1.2", "broccoli-asset-rev": "3.0.0", "concurrently": "9.2.1", "ember-auto-import": "2.12.1", "ember-cli": "6.9.1", "ember-cli-app-version": "7.0.0", "ember-cli-babel": "8.2.0", "ember-cli-clean-css": "3.0.0", "ember-cli-dependency-checker": "3.3.3", "ember-cli-deprecation-workflow": "3.4.0", "ember-cli-htmlbars": "6.3.0", "ember-cli-inject-live-reload": "2.1.0", "ember-eslint": "0.6.1", "ember-load-initializers": "3.0.1", "ember-modifier": "4.2.2", "ember-page-title": "9.0.3", "ember-qunit": "9.0.4", "ember-resolver": "13.1.1", "ember-source": "6.10.1", "ember-template-imports": "4.4.0", "ember-welcome-page": "7.0.2", "eslint": "9.39.4", "loader.js": "4.7.0", "prettier": "3.8.1", "prettier-plugin-ember-template-tag": "2.1.3", "qunit": "2.25.0", "qunit-dom": "3.5.0", "tracked-built-ins": "3.4.0", "webpack": "5.104.1" }, "dependencies": { "ember-exam": "workspace:*" }, "engines": { "node": ">= 20.11" }, "ember": { "edition": "octane" } } ================================================ FILE: test-apps/embroider3-webpack/public/robots.txt ================================================ # http://www.robotstxt.org User-agent: * Disallow: ================================================ FILE: test-apps/embroider3-webpack/testem.js ================================================ 'use strict'; module.exports = { test_page: 'tests/index.html?hidepassed', disable_watching: true, launch_in_ci: ['Chrome'], launch_in_dev: ['Chrome'], browser_start_timeout: 120, browser_args: { Chrome: { ci: [ // --no-sandbox is needed when running Chrome inside a container process.env.CI ? '--no-sandbox' : null, '--headless', '--disable-dev-shm-usage', '--disable-software-rasterizer', '--mute-audio', '--remote-debugging-port=0', '--window-size=1440,900', ].filter(Boolean), }, }, }; ================================================ FILE: test-apps/embroider3-webpack/tests/index.html ================================================ Embroider3Webpack Tests {{content-for "head"}} {{content-for "test-head"}} {{content-for "head-footer"}} {{content-for "test-head-footer"}} {{content-for "body"}} {{content-for "test-body"}}
{{content-for "body-footer"}} {{content-for "test-body-footer"}} ================================================ FILE: test-apps/embroider3-webpack/tests/test-helper.js ================================================ import Application from 'embroider3-webpack/app'; import config from 'embroider3-webpack/config/environment'; import { setApplication } from '@ember/test-helpers'; import { setupEmberOnerrorValidation } from 'ember-qunit'; import { start as startEmberExam } from 'ember-exam/test-support'; setApplication(Application.create(config.APP)); setupEmberOnerrorValidation(); startEmberExam(); ================================================ FILE: test-apps/vite-with-compat/.editorconfig ================================================ # EditorConfig helps developers define and maintain consistent # coding styles between different editors and IDEs # editorconfig.org root = true [*] end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true indent_style = space indent_size = 2 [*.hbs] insert_final_newline = false [*.{diff,md}] trim_trailing_whitespace = false ================================================ FILE: test-apps/vite-with-compat/.ember-cli ================================================ { /** Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript rather than JavaScript by default, when a TypeScript version of a given blueprint is available. */ "isTypeScriptProject": false, /** Setting `componentAuthoringFormat` to "strict" will force the blueprint generators to generate GJS or GTS files for the component and the component rendering test. "loose" is the default. */ "componentAuthoringFormat": "strict", /** Setting `routeAuthoringFormat` to "strict" will force the blueprint generators to generate GJS or GTS templates for routes. "loose" is the default */ "routeAuthoringFormat": "strict" } ================================================ FILE: test-apps/vite-with-compat/.gitignore ================================================ # compiled output /dist/ /declarations/ /tmp/ # dependencies /node_modules/ # misc *.local /.pnp* /.eslintcache /coverage/ /npm-debug.log* /testem.log /yarn-error.log # ember-try /.node_modules.ember-try/ /npm-shrinkwrap.json.ember-try /package.json.ember-try /package-lock.json.ember-try /yarn.lock.ember-try # broccoli-debug /DEBUG/ ================================================ FILE: test-apps/vite-with-compat/.prettierignore ================================================ # unconventional js /blueprints/*/files/ # compiled output /dist/ # misc /coverage/ !.* .*/ /pnpm-lock.yaml ember-cli-update.json *.html ================================================ FILE: test-apps/vite-with-compat/.prettierrc.mjs ================================================ export default { plugins: ['prettier-plugin-ember-template-tag'], singleQuote: true, overrides: [ { files: ['*.js', '*.ts', '*.cjs', '.mjs', '.cts', '.mts', '.cts'], options: { trailingComma: 'es5', }, }, { files: ['*.html'], options: { singleQuote: false, }, }, { files: ['*.json'], options: { singleQuote: false, }, }, { files: ['*.hbs'], options: { singleQuote: false, }, }, { files: ['*.gjs', '*.gts'], options: { templateSingleQuote: false, trailingComma: 'es5', }, }, ], }; ================================================ FILE: test-apps/vite-with-compat/.template-lintrc.mjs ================================================ export default { extends: 'recommended', }; ================================================ FILE: test-apps/vite-with-compat/.watchmanconfig ================================================ { "ignore_dirs": ["dist"] } ================================================ FILE: test-apps/vite-with-compat/README.md ================================================ # vite-with-compat This README outlines the details of collaborating on this Ember application. A short introduction of this app could easily go here. ## Prerequisites You will need the following things properly installed on your computer. - [Git](https://git-scm.com/) - [Node.js](https://nodejs.org/) - [pnpm](https://pnpm.io/) - [Ember CLI](https://cli.emberjs.com/release/) - [Google Chrome](https://google.com/chrome/) ## Installation - `git clone ` this repository - `cd vite-with-compat` - `pnpm install` ## Running / Development - `pnpm start` - Visit your app at [http://localhost:4200](http://localhost:4200). - Visit your tests at [http://localhost:4200/tests](http://localhost:4200/tests). ### Code Generators Make use of the many generators for code, try `ember help generate` for more details ### Running Tests - `pnpm test` - `pnpm test --server` ### Linting - `pnpm lint` - `pnpm lint:fix` ### Building - `pnpm ember build` (development) - `pnpm build` (production) ### Deploying Specify what it takes to deploy your app. ## Further Reading / Useful Links - [ember.js](https://emberjs.com/) - [ember-cli](https://cli.emberjs.com/release/) - Development Browser Extensions - [ember inspector for chrome](https://chrome.google.com/webstore/detail/ember-inspector/bmdblncegkenkacieihfhpjfppoconhi) - [ember inspector for firefox](https://addons.mozilla.org/en-US/firefox/addon/ember-inspector/) ================================================ FILE: test-apps/vite-with-compat/app/app.js ================================================ import Application from '@ember/application'; import compatModules from '@embroider/virtual/compat-modules'; import Resolver from 'ember-resolver'; import config from 'vite-with-compat/config/environment'; export default class App extends Application { modulePrefix = config.modulePrefix; podModulePrefix = config.podModulePrefix; Resolver = Resolver.withModules(compatModules); } ================================================ FILE: test-apps/vite-with-compat/app/config/environment.js ================================================ import loadConfigFromMeta from '@embroider/config-meta-loader'; import { assert } from '@ember/debug'; const config = loadConfigFromMeta('vite-with-compat'); assert( 'config is not an object', typeof config === 'object' && config !== null ); assert( 'modulePrefix was not detected on your config', 'modulePrefix' in config && typeof config.modulePrefix === 'string' ); assert( 'locationType was not detected on your config', 'locationType' in config && typeof config.locationType === 'string' ); assert( 'rootURL was not detected on your config', 'rootURL' in config && typeof config.rootURL === 'string' ); assert( 'APP was not detected on your config', 'APP' in config && typeof config.APP === 'object' ); export default config; ================================================ FILE: test-apps/vite-with-compat/app/router.js ================================================ import EmberRouter from '@ember/routing/router'; import config from 'vite-with-compat/config/environment'; export default class Router extends EmberRouter { location = config.locationType; rootURL = config.rootURL; } Router.map(function () {}); ================================================ FILE: test-apps/vite-with-compat/babel.config.cjs ================================================ const { babelCompatSupport, templateCompatSupport, } = require('@embroider/compat/babel'); module.exports = { plugins: [ [ 'babel-plugin-ember-template-compilation', { compilerPath: 'ember-source/dist/ember-template-compiler.js', enableLegacyModules: [ 'ember-cli-htmlbars', 'ember-cli-htmlbars-inline-precompile', 'htmlbars-inline-precompile', ], transforms: [...templateCompatSupport()], }, ], [ 'module:decorator-transforms', { runtime: { import: require.resolve('decorator-transforms/runtime-esm'), }, }, ], [ '@babel/plugin-transform-runtime', { absoluteRuntime: __dirname, useESModules: true, regenerator: false, }, ], ...babelCompatSupport(), ], generatorOpts: { compact: false, }, }; ================================================ FILE: test-apps/vite-with-compat/config/ember-cli-update.json ================================================ { "schemaVersion": "1.0.0", "packages": [ { "name": "@ember/app-blueprint", "version": "0.8.1", "blueprints": [ { "name": "@ember/app-blueprint", "isBaseBlueprint": true, "options": [ "--pnpm", "--ci-provider=none" ] } ] } ] } ================================================ FILE: test-apps/vite-with-compat/config/environment.js ================================================ 'use strict'; module.exports = function (environment) { const ENV = { modulePrefix: 'vite-with-compat', environment, rootURL: '/', locationType: 'history', EmberENV: { EXTEND_PROTOTYPES: false, FEATURES: { // Here you can enable experimental features on an ember canary build // e.g. EMBER_NATIVE_DECORATOR_SUPPORT: true }, }, APP: { // Here you can pass flags/options to your application instance // when it is created }, }; if (environment === 'development') { // ENV.APP.LOG_RESOLVER = true; // ENV.APP.LOG_ACTIVE_GENERATION = true; // ENV.APP.LOG_TRANSITIONS = true; // ENV.APP.LOG_TRANSITIONS_INTERNAL = true; // ENV.APP.LOG_VIEW_LOOKUPS = true; } if (environment === 'test') { // Testem prefers this... ENV.locationType = 'none'; // keep test console output quieter ENV.APP.LOG_ACTIVE_GENERATION = false; ENV.APP.LOG_VIEW_LOOKUPS = false; ENV.APP.rootElement = '#ember-testing'; ENV.APP.autoboot = false; } if (environment === 'production') { // here you can enable a production-specific feature } return ENV; }; ================================================ FILE: test-apps/vite-with-compat/config/optional-features.json ================================================ { "application-template-wrapper": false, "default-async-observers": true, "jquery-integration": false, "template-only-glimmer-components": true, "no-implicit-route-model": true } ================================================ FILE: test-apps/vite-with-compat/config/targets.js ================================================ 'use strict'; const browsers = [ 'last 1 Chrome versions', 'last 1 Firefox versions', 'last 1 Safari versions', ]; module.exports = { browsers, }; ================================================ FILE: test-apps/vite-with-compat/ember-cli-build.js ================================================ 'use strict'; const EmberApp = require('ember-cli/lib/broccoli/ember-app'); const { compatBuild } = require('@embroider/compat'); module.exports = async function (defaults) { const { buildOnce } = await import('@embroider/vite'); let app = new EmberApp(defaults, { emberData: { deprecations: { // New projects can safely leave this deprecation disabled. // If upgrading, to opt-into the deprecated behavior, set this to true and then follow: // https://deprecations.emberjs.com/id/ember-data-deprecate-store-extends-ember-object // before upgrading to Ember Data 6.0 DEPRECATE_STORE_EXTENDS_EMBER_OBJECT: false, }, }, // Add options here }); return compatBuild(app, buildOnce); }; ================================================ FILE: test-apps/vite-with-compat/eslint.config.mjs ================================================ import { ember } from 'ember-eslint'; import * as url from 'url'; // Needed until Node 20 const dirname = url.fileURLToPath(new URL('.', import.meta.url)); export default [...ember.recommended(dirname)]; ================================================ FILE: test-apps/vite-with-compat/index.html ================================================ ================================================ FILE: test-apps/vite-with-compat/package.json ================================================ { "name": "vite-with-compat", "version": "0.0.0", "private": true, "description": "Small description for vite-with-compat goes here", "repository": "", "license": "MIT", "author": "", "directories": { "doc": "doc", "test": "tests" }, "scripts": { ":exam": "ember exam --config-file ./testem.cjs --path dist --split 2 --parallel 1", "start": "vite", "start:vite-preview": "vite preview", "start:ember-server": "ember test --config-file ./testem.cjs --path dist --serve --no-launch", "start:exam": "pnpm :exam server", "build": "vite build", "build:tests": "vite build --mode development", "format": "prettier . --cache --write", "lint": "concurrently \"pnpm:lint:*(!fix)\" --names \"lint:\" --prefixColors auto", "lint:fix": "concurrently \"pnpm:lint:*:fix\" --names \"fix:\" --prefixColors auto && pnpm format", "lint:format": "prettier . --cache --check", "lint:hbs": "ember-template-lint .", "lint:hbs:fix": "ember-template-lint . --fix", "lint:js": "eslint . --cache", "lint:js:fix": "eslint . --fix", "sync": "echo 'pnpm will sync injected dependencies. See pnpm-workspace.yaml'", "test:normal": "testem ci", "test:ember": "pnpm build:tests && pnpm test:normal", "test:exam": "pnpm :exam --random" }, "exports": { "./tests/*": "./tests/*", "./*": "./app/*" }, "dependencies": { "ember-exam": "workspace:*" }, "devDependencies": { "@babel/core": "7.28.6", "@babel/plugin-transform-runtime": "7.28.5", "@babel/runtime": "7.28.6", "@ember/optional-features": "2.3.0", "@ember/string": "4.0.1", "@ember/test-helpers": "5.4.1", "@ember/test-waiters": "4.1.1", "@embroider/compat": "4.1.17", "@embroider/config-meta-loader": "1.0.0", "@embroider/core": "4.4.7", "@embroider/macros": "1.19.7", "@embroider/vite": "1.5.2", "@glimmer/component": "2.0.0", "@rollup/plugin-babel": "6.1.0", "babel-plugin-ember-template-compilation": "2.4.1", "concurrently": "9.2.1", "decorator-transforms": "2.3.1", "ember-auto-import": "2.12.1", "ember-cli": "6.9.1", "ember-cli-babel": "8.2.0", "ember-eslint": "0.6.1", "ember-qunit": "9.0.4", "ember-resolver": "13.1.1", "ember-source": "6.10.1", "ember-template-lint": "7.9.3", "eslint": "9.39.4", "prettier": "3.8.1", "prettier-plugin-ember-template-tag": "2.1.3", "qunit": "2.25.0", "qunit-dom": "3.5.0", "testem": "3.17.0", "vite": "7.3.2" }, "engines": { "node": ">= 18" }, "ember": { "edition": "octane" } } ================================================ FILE: test-apps/vite-with-compat/public/robots.txt ================================================ # http://www.robotstxt.org User-agent: * Disallow: ================================================ FILE: test-apps/vite-with-compat/testem.cjs ================================================ 'use strict'; if (typeof module !== 'undefined') { module.exports = { test_page: 'tests/index.html?hidepassed', cwd: process.env.TESTEM_DIR ?? 'dist', disable_watching: true, launch_in_ci: ['Chrome'], launch_in_dev: ['Chrome'], browser_start_timeout: 120, browser_disconnect_timeout: 30, browser_args: { Chrome: { ci: [ // --no-sandbox is needed when running Chrome inside a container process.env.CI ? '--no-sandbox' : null, '--headless', '--disable-dev-shm-usage', '--disable-software-rasterizer', '--mute-audio', '--remote-debugging-port=0', '--window-size=1440,900', ].filter(Boolean), }, }, }; } ================================================ FILE: test-apps/vite-with-compat/tests/index.html ================================================ ViteWithCompat Tests {{content-for "head"}} {{content-for "test-head"}} {{content-for "head-footer"}} {{content-for "test-head-footer"}} {{content-for "body"}} {{content-for "test-body"}}
{{content-for "body-footer"}} ================================================ FILE: test-apps/vite-with-compat/tests/integration/a-test.gjs ================================================ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render } from '@ember/test-helpers'; console.log('Suite A is evaluated'); module('Suite A', function (hooks) { setupRenderingTest(hooks); test('a', async function (assert) { await render(); assert.dom().hasText('a'); }); test('b', async function (assert) { await new Promise(resolve => setTimeout(resolve, 2_000)); await render(); assert.dom().hasText('b'); }); test('c', async function (assert) { await render(); assert.dom().hasText('c'); }); }); ================================================ FILE: test-apps/vite-with-compat/tests/integration/b-test.gjs ================================================ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render } from '@ember/test-helpers'; console.log('Suite B is evaluated'); module('Suite B', function (hooks) { setupRenderingTest(hooks); test('a', async function (assert) { await new Promise((resolve) => setTimeout(resolve, 2_000)); await render(); assert.dom().hasText('a'); }); test('b', async function (assert) { await render(); assert.dom().hasText('b'); }); test('c', async function (assert) { await render(); assert.dom().hasText('c'); }); }); ================================================ FILE: test-apps/vite-with-compat/tests/test-helper.js ================================================ import Application from 'vite-with-compat/app'; import config from 'vite-with-compat/config/environment'; import * as QUnit from 'qunit'; import { setApplication } from '@ember/test-helpers'; import { setup } from 'qunit-dom'; import { start as startEmberExam } from 'ember-exam/test-support'; export async function start({ availableModules }) { setApplication(Application.create(config.APP)); setup(QUnit.assert); await startEmberExam({ availableModules }); } ================================================ FILE: test-apps/vite-with-compat/vite.config.mjs ================================================ import { defineConfig } from 'vite'; import { extensions, classicEmberSupport, ember } from '@embroider/vite'; import { babel } from '@rollup/plugin-babel'; export default defineConfig({ plugins: [ classicEmberSupport(), ember(), // extra plugins here babel({ babelHelpers: 'runtime', extensions, }), ], build: { rollupOptions: { output: { /** * This is super try-hard mode, but since we're debugging * with built assets, we want to have smaller chunks for easier debugging without sourcemaps. */ // manualChunks(id) { // let maybePkg = guessPkgName(id); // // return maybePkg; // }, }, }, }, }); // eslint-disable-next-line no-unused-vars function guessPkgName(id) { if (!id.includes('/')) { return id; } let parts = id.split('/node_modules/'); let significant = parts.at(-1); { let parts = significant.split('.pnpm/'); significant = parts.at(-1); } { let parts = significant.split('/'); if (parts[0] !== id) return parts[0].replace('@', ''); } if (id.includes('/-embroider')) { return 'embroider'; } return guessPkgName(id); } ================================================ FILE: testem.js ================================================ module.exports = { test_page: 'tests/index.html?hidepassed', disable_watching: true, launch_in_ci: ['Chrome'], launch_in_dev: ['Chrome'], timeout: 25, browser_args: { Chrome: { ci: [ // --no-sandbox is needed when running Chrome inside a container process.env.CI ? '--no-sandbox' : null, '--headless', '--disable-dev-shm-usage', '--disable-software-rasterizer', '--mute-audio', '--remote-debugging-port=0', '--window-size=1440,900', ].filter(Boolean), }, }, parallel: -1, }; ================================================ FILE: testem.multiple-test-page.js ================================================ module.exports = { framework: 'qunit', test_page: [ 'tests/index.html?hidepassed&derp=herp', 'tests/index.html?hidepassed&foo=bar', ], disable_watching: true, launch_in_ci: ['Chrome'], launch_in_dev: ['Chrome'], browser_args: { Chrome: [ '--disable-gpu', '--headless', '--remote-debugging-port=9222', '--window-size=1440,900', ], }, parallel: -1, }; ================================================ FILE: testem.no-test-page.js ================================================ module.exports = { framework: 'qunit', disable_watching: true, launch_in_ci: ['Chrome'], launch_in_dev: ['Chrome'], browser_args: { Chrome: [ '--disable-gpu', '--headless', '--remote-debugging-port=9222', '--window-size=1440,900', ], }, parallel: -1, }; ================================================ FILE: testem.simple-test-page.js ================================================ module.exports = { foo: 'bar', }; ================================================ FILE: tests/dummy/app/app.js ================================================ import Application from '@ember/application'; import Resolver from 'ember-resolver'; import loadInitializers from 'ember-load-initializers'; import config from './config/environment'; class App extends Application { modulePrefix = config.modulePrefix; podModulePrefix = config.podModulePrefix; Resolver = Resolver; } loadInitializers(App, config.modulePrefix); export default App; ================================================ FILE: tests/dummy/app/index.html ================================================ Ember Exam {{content-for "head"}} {{content-for "head-footer"}} {{content-for "body"}} {{content-for "body-footer"}} ================================================ FILE: tests/dummy/app/router.js ================================================ import AddonDocsRouter, { docsRoute } from 'ember-cli-addon-docs/router'; import config from './config/environment'; const Router = AddonDocsRouter.extend({ location: config.locationType, rootURL: config.rootURL, }); Router.map(function () { docsRoute(this, function () { this.route('randomization'); this.route('randomization-iterator'); this.route('module-metadata'); this.route('splitting'); this.route('split-parallel'); this.route('filtering'); this.route('load-balancing'); this.route('preserve-test-name'); this.route('ember-try-and-ci'); this.route('test-suite-segmentation'); }); this.route('not-found', { path: '/*path' }); }); export default Router; ================================================ FILE: tests/dummy/app/styles/app.css ================================================ :root { --brand-primary: #751c27; } .home { padding-left: 1rem; padding-right: 1rem; max-width: 900px; margin: 2rem auto 4rem; } .home__section { margin-bottom: 2.5rem; } .home__lead { margin-top: 0.5rem; font-size: 18px; line-height: 1.5; } .max-width { max-width: 100%; } ================================================ FILE: tests/dummy/config/ember-cli-update.json ================================================ { "schemaVersion": "1.0.0", "packages": [ { "name": "ember-cli", "version": "5.5.0", "blueprints": [ { "name": "addon", "outputRepo": "https://github.com/ember-cli/ember-addon-output", "codemodsSource": "ember-addon-codemods-manifest@1", "isBaseBlueprint": true, "options": [ "--yarn", "--no-welcome" ] } ] } ] } ================================================ FILE: tests/dummy/config/ember-try.js ================================================ 'use strict'; const getChannelURL = require('ember-source-channel-url'); const { embroiderSafe, embroiderOptimized } = require('@embroider/test-setup'); const command = [ 'ember', 'exam', '--split', '3', '--parallel', '1', '--random', process.env.TRAVIS_PULL_REQUEST, ] .filter(Boolean) .join(' '); module.exports = async function () { return { command, usePnpm: true, scenarios: [ { name: 'ember-lts-4.8', npm: { devDependencies: { 'ember-source': '~4.8.0', }, }, }, { name: 'ember-lts-4.12', npm: { devDependencies: { 'ember-source': '~4.12.0', }, }, }, { name: 'ember-release', npm: { devDependencies: { 'ember-source': await getChannelURL('release'), }, }, }, { name: 'ember-beta', npm: { devDependencies: { 'ember-source': await getChannelURL('beta'), }, }, }, { name: 'ember-canary', npm: { devDependencies: { 'ember-source': await getChannelURL('canary'), }, }, }, embroiderSafe(), embroiderOptimized(), ], }; }; ================================================ FILE: tests/dummy/config/environment.js ================================================ 'use strict'; module.exports = function (environment) { const ENV = { modulePrefix: 'dummy', environment, rootURL: '/', locationType: 'history', EmberENV: { EXTEND_PROTOTYPES: false, FEATURES: { // Here you can enable experimental features on an ember canary build // e.g. 'with-controller': true }, }, APP: { // Here you can pass flags/options to your application instance // when it is created }, }; if (environment === 'development') { // ENV.APP.LOG_RESOLVER = true; // ENV.APP.LOG_ACTIVE_GENERATION = true; // ENV.APP.LOG_TRANSITIONS = true; // ENV.APP.LOG_TRANSITIONS_INTERNAL = true; // ENV.APP.LOG_VIEW_LOOKUPS = true; } if (environment === 'test') { // Testem prefers this... ENV.locationType = 'none'; // keep test console output quieter ENV.APP.LOG_ACTIVE_GENERATION = false; ENV.APP.LOG_VIEW_LOOKUPS = false; ENV.APP.rootElement = '#ember-testing'; ENV.APP.autoboot = false; } if (environment === 'production') { // Allow ember-cli-addon-docs to update the rootURL in compiled assets ENV.rootURL = 'ADDON_DOCS_ROOT_URL'; // here you can enable a production-specific feature } return ENV; }; ================================================ FILE: tests/dummy/config/optional-features.json ================================================ { "application-template-wrapper": false, "jquery-integration": false, "template-only-glimmer-components": true } ================================================ FILE: tests/dummy/config/targets.js ================================================ 'use strict'; const browsers = [ 'last 1 Chrome versions', 'last 1 Firefox versions', 'last 1 Safari versions', ]; module.exports = { browsers, }; ================================================ FILE: tests/dummy/public/crossdomain.xml ================================================ ================================================ FILE: tests/dummy/public/robots.txt ================================================ # http://www.robotstxt.org User-agent: * Disallow: ================================================ FILE: tests/index.html ================================================ Ember Exam {{content-for "head"}} {{content-for "test-head"}} {{content-for "head-footer"}} {{content-for "test-head-footer"}} {{content-for "body"}} {{content-for "test-body"}}
{{content-for "body-footer"}} {{content-for "test-body-footer"}} ================================================ FILE: tests/test-helper.js ================================================ import Application from 'dummy/app'; import config from 'dummy/config/environment'; import { setApplication } from '@ember/test-helpers'; import { start as startEmberExam } from 'ember-exam/test-support'; import { setupEmberOnerrorValidation } from 'ember-qunit'; setApplication(Application.create(config.APP)); setupEmberOnerrorValidation(); startEmberExam(); ================================================ FILE: tests/unit/async-iterator-test.js ================================================ import AsyncIterator from 'ember-exam/test-support/-private/async-iterator'; import { module, test } from 'qunit'; module('Unit | async-iterator', function (hooks) { hooks.beforeEach(function () { this.testem = { eventHandler: new Array(), emit: function (event) { const argsWithoutFirst = Array.prototype.slice.call(arguments, 1); if (this.eventHandler && this.eventHandler[event]) { let handlers = this.eventHandler[event]; for (let i = 0; i < handlers.length; i++) { handlers[i].apply(this, argsWithoutFirst); } } }, on: function (event, callBack) { if (!this.eventHandler) { this.eventHandler = {}; } if (!this.eventHandler[event]) { this.eventHandler[event] = []; } this.eventHandler[event].push(callBack); }, removeEventCallbacks: () => {}, }; }); test('should instantiate', function (assert) { let iteratorOfPromises = new AsyncIterator(this.testem, { request: 'next-module-request', response: 'next-module-response', }); assert.false(iteratorOfPromises.done); assert.deepEqual(typeof iteratorOfPromises.next, 'function'); assert.deepEqual(typeof iteratorOfPromises.dispose, 'function'); }); test('should get the value from response.', function (assert) { const done = assert.async(); this.testem.on('next-module-request', () => { this.testem.emit('next-module-response', { done: false, value: 'a', }); }); const iteratorOfPromises = new AsyncIterator(this.testem, { request: 'next-module-request', response: 'next-module-response', }); iteratorOfPromises.next().then((result) => { assert.deepEqual(result.value, 'a'); done(); }); }); test('should iterate promises until there is no response.', function (assert) { const done = assert.async(); const testem = this.testem; const responses = ['a', 'b', 'c']; testem.on('next-module-request', () => { testem.emit('next-module-response', { done: responses.length === 0, value: responses.shift(), }); }); const iteratorOfPromises = new AsyncIterator(testem, { request: 'next-module-request', response: 'next-module-response', }); let values = []; iteratorOfPromises .next() .then((res) => { values.push(res.value); return iteratorOfPromises.next(); }) .then((res) => { values.push(res.value); return iteratorOfPromises.next(); }) .then((res) => { values.push(res.value); assert.deepEqual(values, ['a', 'b', 'c']); done(); }); }); test('should return false after disposing', function (assert) { const iteratorOfPromises = new AsyncIterator(this.testem, { request: 'next-module-request', response: 'next-module-response', }); iteratorOfPromises.dispose(); assert.true(iteratorOfPromises.done); }); test('should dispose after iteration.', function (assert) { const done = assert.async(); const testem = this.testem; const responses = ['a', 'b', 'c']; testem.on('next-module-request', () => { testem.emit('next-module-response', { done: responses.length === 0, value: responses.shift(), }); }); const iteratorOfPromises = new AsyncIterator(testem, { request: 'next-module-request', response: 'next-module-response', }); iteratorOfPromises .next() .then((res) => { assert.false(res.done); return iteratorOfPromises.next(); }) .then((res) => { assert.false(res.done); return iteratorOfPromises.next(); }) .then((res) => { assert.false(res.done); return iteratorOfPromises.next(); }) .then((res) => { assert.true(res.done); done(); }); }); test('should resolve with iterator finishing if request is not handled within 2s', function (assert) { const done = assert.async(); const iteratorOfPromises = new AsyncIterator(this.testem, { request: 'next-module-request', response: 'next-module-response', timeout: 2, }); return iteratorOfPromises.next().then((res) => { assert.true(res.done); done(); }); }); test('should resolve a timeout error if request is not handled within 2s when emberExamExitOnError is true', function (assert) { const done = assert.async(); const iteratorOfPromises = new AsyncIterator(this.testem, { request: 'next-module-request', response: 'next-module-response', timeout: 2, emberExamExitOnError: true, }); return iteratorOfPromises.next().then( () => { assert.ok(false, 'Promise should not resolve, expecting reject'); done(); }, (err) => { assert.deepEqual( err.message, 'EmberExam: Promise timed out after 2 s while waiting for response for next-module-request', ); done(); }, ); }); test('should throw an error if handleResponse is invoked while not waiting for a response', function (assert) { const iteratorOfPromises = new AsyncIterator(this.testem, { request: 'next-module-request', response: 'next-module-response', }); assert.throws( () => iteratorOfPromises.handleResponse({}), /Was not expecting a response, but got a response/, ); }); }); ================================================ FILE: tests/unit/filter-test-modules-test.js ================================================ import { convertFilePathToModulePath, filterTestModules, } from 'ember-exam/test-support/-private/filter-test-modules'; import { module, test } from 'qunit'; import { setupTest } from 'ember-qunit'; module('Unit | filter-test-modules', function () { module('covertFilePathToModulePath', function (hooks) { setupTest(hooks); test('should return an input string without file extension when the input contains file extension', function (assert) { assert.strictEqual( convertFilePathToModulePath('/tests/integration/foo.js'), '/tests/integration/foo', ); }); test(`should return an input string without file extension when the input doesn't contain file extension`, function (assert) { assert.strictEqual( convertFilePathToModulePath('/tests/integration/foo'), '/tests/integration/foo', ); }); test('should return an input string after `tests` when the input is a full test file path', function (assert) { assert.strictEqual( convertFilePathToModulePath('dummy/tests/integration/foo.js'), '/tests/integration/foo', ); }); }); module('modulePath', function (hooks) { setupTest(hooks); hooks.beforeEach(function () { this.modules = ['foo-test', 'bar-test']; }); hooks.afterEach(function () { this.modules = []; }); test('should return a list of filtered tests', function (assert) { assert.deepEqual(['foo-test'], filterTestModules(this.modules, 'foo')); }); test('should return an empty list when there is no match', function (assert) { assert.throws( () => filterTestModules(this.modules, 'no-match'), /No tests matched with the filter:/, ); }); test('should return a list of tests matched with a regular expression', function (assert) { assert.deepEqual(['foo-test'], filterTestModules(this.modules, '/foo/')); }); test('should return a list of tests matched with a regular expression that excluse foo', function (assert) { assert.deepEqual(['bar-test'], filterTestModules(this.modules, '!/foo/')); }); test('should return a list of tests matches with a list of string options', function (assert) { assert.deepEqual( ['foo-test', 'bar-test'], filterTestModules(this.modules, 'foo, bar'), ); }); test('should return a list of unique tests matches when options are repeated', function (assert) { assert.deepEqual( ['foo-test'], filterTestModules(this.modules, 'foo, foo'), ); }); }); module('filePath', function (hooks) { setupTest(hooks); hooks.beforeEach(function () { this.modules = [ 'dummy/tests/integration/foo-test', 'dummy/tests/unit/foo-test', 'dummy/tests/unit/bar-test', ]; }); hooks.afterEach(function () { this.modules = []; }); test('should return a test module matched with full test file path', function (assert) { assert.deepEqual( ['dummy/tests/integration/foo-test'], filterTestModules( this.modules, null, 'app/tests/integration/foo-test.js', ), ); }); test('should return a test module matched with relative test file path', function (assert) { assert.deepEqual( ['dummy/tests/unit/foo-test'], filterTestModules(this.modules, null, '/unit/foo-test'), ); }); test('should return a test module matched with test folder path with wildcard', function (assert) { assert.deepEqual( ['dummy/tests/unit/foo-test', 'dummy/tests/unit/bar-test'], filterTestModules(this.modules, null, '/unit/*'), ); }); test('should return a test module matched with test file path with wildcard', function (assert) { assert.deepEqual( ['dummy/tests/integration/foo-test', 'dummy/tests/unit/foo-test'], filterTestModules(this.modules, null, '/tests/*/foo*'), ); }); test('should return an empty list when there is no match', function (assert) { assert.throws( () => filterTestModules(this.modules, null, 'no-match'), /No tests matched with the filter:/, ); }); test('should return a list of tests matches with a list of string options', function (assert) { assert.deepEqual( ['dummy/tests/integration/foo-test', 'dummy/tests/unit/foo-test'], filterTestModules( this.modules, null, '/tests/integration/*, dummy/tests/unit/foo-test', ), ); }); test('should return a list of unique tests matches when options are repeated', function (assert) { assert.deepEqual( ['dummy/tests/unit/bar-test', 'dummy/tests/unit/foo-test'], filterTestModules( this.modules, null, 'app/tests/unit/bar-test.js, /tests/unit/*', ), ); }); }); }); ================================================ FILE: tests/unit/multiple-edge-cases-test.js ================================================ import { module, test } from 'qunit'; module('#3: Module With Multiple Edge Case Tests', function () { test('#1 RegExp test', function (assert) { assert.ok(/derp/.test('derp')); }); }); ================================================ FILE: tests/unit/multiple-ember-tests-test.js ================================================ import { module, test } from 'qunit'; import { setupTest } from 'ember-qunit'; module('#1: Module-For With Multiple Tests', function (hooks) { setupTest(hooks); test('#1', function (assert) { assert.ok(true); }); test('#2', function (assert) { assert.ok(true); }); test('#3', function (assert) { assert.ok(true); }); test('#4', function (assert) { assert.ok(true); }); test('#5', function (assert) { assert.ok(true); }); test('#6', function (assert) { assert.ok(true); }); test('#7', function (assert) { assert.ok(true); }); test('#8', function (assert) { assert.ok(true); }); test('#9', function (assert) { assert.ok(true); }); }); ================================================ FILE: tests/unit/multiple-tests-test.js ================================================ import { module, test } from 'qunit'; module('#2: Module With Multiple Tests', function () { test('#1', function (assert) { assert.ok(true); }); test('#2', function (assert) { assert.ok(true); }); test('#3', function (assert) { assert.ok(true); }); test('#4', function (assert) { assert.ok(true); }); test('#5', function (assert) { assert.ok(true); }); test('#6', function (assert) { assert.ok(true); }); test('#7', function (assert) { assert.ok(true); }); test('#8', function (assert) { assert.ok(true); }); test('#9', function (assert) { assert.ok(true); }); }); ================================================ FILE: tests/unit/test-loader-test.js ================================================ import EmberExamTestLoader from 'ember-exam/test-support/-private/ember-exam-test-loader'; import { module, test } from 'qunit'; module('Unit | test-loader', function (hooks) { hooks.beforeEach(function () { this.originalRequire = window.require; this.requiredModules = []; window.require = (name) => { this.requiredModules.push(name); }; window.requirejs.entries = { 'test-1-test': true, 'test-2-test': true, 'test-3-test': true, 'test-4-test': true, }; this.testem = { eventQueue: new Array(), emit: function (event) { this.eventQueue.push(event); }, on: () => {}, }; this.qunit = { config: { queue: [], }, begin: () => {}, moduleDone: () => {}, testDone: () => {}, }; }); hooks.afterEach(function () { window.require = this.originalRequire; }); test('loads all test modules by default', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map(), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, [ 'test-1-test', 'test-2-test', 'test-3-test', 'test-4-test', ]); }); test('loads all test modules when testem object is not available', async function (assert) { const undefinedTestem = undefined; const testLoader = new EmberExamTestLoader( undefinedTestem, new Map(), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, [ 'test-1-test', 'test-2-test', 'test-3-test', 'test-4-test', ]); }); test('loads modules from a specified partition', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('partition', 3).set('split', 4), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, ['test-3-test']); }); test('loads modules from multiple specified partitions', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('partition', [1, 3]).set('split', 4), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, ['test-1-test', 'test-3-test']); }); test('loads modules from the first partition by default', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 4), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, ['test-1-test']); }); test('handles params as strings', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('partition', 3).set('split', 4), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, ['test-3-test']); }); test('throws an error if splitting less than one', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 0), this.qunit, ); assert.rejects( testLoader.loadModules(), /You must specify a split greater than 0/, ); }); test("throws an error if partition isn't a number", async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 2).set('partition', 'foo'), this.qunit, ); assert.rejects( testLoader.loadModules(), /You must specify numbers for partition \(you specified 'foo'\)/, ); }); test("throws an error if partition isn't a number with multiple partitions", async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 2).set('partition', [1, 'foo']), this.qunit, ); assert.rejects( testLoader.loadModules(), /You must specify numbers for partition \(you specified '1,foo'\)/, ); }); test('throws an error if loading partition greater than split number', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 2).set('partition', 3), this.qunit, ); assert.rejects( testLoader.loadModules(), /You must specify partitions numbered less than or equal to your split value/, ); }); test('throws an error if loading partition greater than split number with multiple partitions', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 2).set('partition', [2, 3]), this.qunit, ); assert.rejects( testLoader.loadModules(), /You must specify partitions numbered less than or equal to your split value/, ); }); test('throws an error if loading partition less than one', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('split', 2).set('partition', 0), this.qunit, ); assert.rejects( testLoader.loadModules(), /You must specify partitions numbered greater than 0/, ); }); test('load works with a double-digit single partition', async function (assert) { window.requirejs.entries = { 'test-1-test': true, 'test-2-test': true, 'test-3-test': true, 'test-4-test': true, 'test-5-test': true, 'test-6-test': true, 'test-7-test': true, 'test-8-test': true, 'test-9-test': true, 'test-10-test': true, }; const testLoader = new EmberExamTestLoader( this.testem, new Map().set('partition', '10').set('split', 10), this.qunit, ); await testLoader.loadModules(); assert.deepEqual(this.requiredModules, ['test-10-test']); }); test('emit then `set-modules-queue` event when load balance option is true', async function (assert) { const testLoader = new EmberExamTestLoader( this.testem, new Map().set('loadBalance', true), this.qunit, ); await testLoader.loadModules(); assert.deepEqual( this.testem.eventQueue, ['testem:set-modules-queue'], 'testem:set-modules-queue event was fired', ); }); }); ================================================ FILE: tests/unit/testem-output-test.js ================================================ import * as TestemOutput from 'ember-exam/test-support/-private/patch-testem-output'; import { module, test } from 'qunit'; module('Unit | patch-testem-output', function () { module('`preserveTestName` is passed', function () { test('does not add partition number to test name when `split` is passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map().set('split', 2).set('preserveTestName', true), 'test_module | test_name', ), 'test_module | test_name', ); }); test('does not add partition number to test name when `split` and `partition` are passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map() .set('split', 2) .set('partition', 2) .set('preserveTestName', true), 'test_module | test_name', ), 'test_module | test_name', ); }); test('does not add browser number to test name when `loadBalance` and `browser` are passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map() .set('loadBalance', 2) .set('browser', 1) .set('preserveTestName', true), 'test_module | test_name', ), 'test_module | test_name', ); }); test('does not add partition number, browser number to test name when `split`, `partition`, `browser`, and `loadBalance` are passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map() .set('split', 2) .set('partition', 2) .set('browser', 1) .set('loadBalance', 2) .set('preserveTestName', true), 'test_module | test_name', ), 'test_module | test_name', ); }); }); module('`preserveTestName` is not passed', function () { test('adds partition number to test name when `split` is passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map().set('split', 2), 'test_module | test_name', ), 'Exam Partition 1 - test_module | test_name', ); }); test('adds partition number to test name when `split` and `partition` are passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map().set('split', 2).set('partition', 2), 'test_module | test_name', ), 'Exam Partition 2 - test_module | test_name', ); }); test('adds browser number to test name when `loadBalance` and `browser` are passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map().set('loadBalance', 2).set('browser', 1), 'test_module | test_name', ), 'Browser Id 1 - test_module | test_name', ); }); test('adds partition number, browser number to test name when `split`, `partition`, `browser`, and `loadBalance` are passed', function (assert) { assert.deepEqual( TestemOutput.updateTestName( new Map() .set('split', 2) .set('partition', 2) .set('browser', 1) .set('loadBalance', 2), 'test_module | test_name', ), 'Exam Partition 2 - Browser Id 1 - test_module | test_name', ); }); }); }); ================================================ FILE: tests/unit/weight-test-modules-test.js ================================================ import weightTestModules from 'ember-exam/test-support/-private/weight-test-modules'; import { module, test } from 'qunit'; module('Unit | weight-test-modules', function () { test('should sort a list of file paths by weight', function (assert) { const listOfModules = [ '/acceptance/test-1-test', '/unit/test-1-test', '/integration/test-1-test', 'test-1-test', ]; assert.deepEqual( [ '/acceptance/test-1-test', 'test-1-test', '/integration/test-1-test', '/unit/test-1-test', ], weightTestModules(listOfModules), ); }); test('should sort a list of file paths by weight and alphabetical order', function (assert) { const listOfModules = [ 'test-b-test', 'test-a-test', '/integration/test-b-test', '/integration/test-a-test', '/unit/test-b-test', '/acceptance/test-b-test', '/acceptance/test-a-test', '/unit/test-a-test', ]; assert.deepEqual( [ '/acceptance/test-a-test', '/acceptance/test-b-test', 'test-a-test', 'test-b-test', '/integration/test-a-test', '/integration/test-b-test', '/unit/test-a-test', '/unit/test-b-test', ], weightTestModules(listOfModules), ); }); });