[
  {
    "path": ".clean-publish",
    "content": "{\n    \"cleanDocs\": true\n}\n"
  },
  {
    "path": ".github/FUNDING.yml",
    "content": "open_collective: aslemammad\ngithub: [aslemammad]\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"npm\" # See documentation for possible values\n    directory: \"/\" # Location of package manifests\n    schedule:\n      interval: \"daily\""
  },
  {
    "path": ".github/workflows/benchmark.yml",
    "content": "on: [workflow_dispatch]\n\nname: Benchmark\n\njobs:\n  test:\n    name: Test\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [ubuntu-latest, macos-latest, windows-latest]\n        node-version: [20.x, 22.x]\n\n    runs-on: ${{matrix.os}}\n    steps:\n      - uses: actions/checkout@v2\n\n      - name: Use Node.js ${{ matrix.node-version }}\n        uses: actions/setup-node@v1\n        with:\n          node-version: ${{ matrix.node-version }}\n\n      - uses: pnpm/action-setup@v2\n\n      - name: Install Dependencies\n        run: pnpm install\n\n      - name: Build\n        run: pnpm build\n\n      - name: Benchmark\n        run: pnpm bench\n"
  },
  {
    "path": ".github/workflows/nodejs.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n  workflow_dispatch:\n\njobs:\n  test:\n    name: Test\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [ubuntu-latest, macos-latest, windows-latest]\n        node-version: [20.x, 22.x]\n\n    runs-on: ${{matrix.os}}\n    steps:\n      - uses: actions/checkout@v2\n\n      - name: Use Node.js ${{ matrix.node-version }}\n        uses: actions/setup-node@v1\n        with:\n          node-version: ${{ matrix.node-version }}\n\n      - uses: pnpm/action-setup@v2\n\n      - name: Install Dependencies\n        run: pnpm install\n\n      - name: Build\n        run: pnpm build\n\n      - name: Typecheck\n        run: pnpm typecheck\n\n      - name: Lint\n        run: pnpm lint\n\n      - name: Test\n        run: pnpm test\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "name: Publish\n\non:\n  workflow_dispatch:\n    inputs:\n      release-type:\n        type: choice\n        description: Type of the release\n        options:\n          - patch\n          - minor\n          - major\n\npermissions:\n  contents: write\n  id-token: write\n\njobs:\n  publish:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n\n      - uses: pnpm/action-setup@v2\n\n      - uses: actions/setup-node@v4\n        with:\n          node-version: 22\n          registry-url: 'https://registry.npmjs.org'\n\n      # OICD requires updated npm even when pnpm is used\n      - name: Update npm\n        run: |\n          npm --version\n          npm install -g npm@latest\n          npm --version\n\n      - name: Install Dependencies\n        run: pnpm install\n\n      - name: Build\n        run: pnpm build\n\n      - name: Typecheck\n        run: pnpm typecheck\n\n      - name: Lint\n        run: pnpm lint\n\n      - name: Test\n        run: pnpm test\n\n      - name: Configure github-actions git\n        run: |\n          git config --global user.name 'github-actions'\n          git config --global user.email 'github-actions@users.noreply.github.com'\n\n      - name: Bump version\n        run: pnpm version ${{ github.event.inputs.release-type }}\n\n      - name: Push release tag\n        run: git push origin main --follow-tags\n\n      - name: Publish to npm\n        run: pnpm publish\n"
  },
  {
    "path": ".github/workflows/release-commits.yml",
    "content": "name: Publish Any Commit\n\non: [push, pull_request]\n\njobs:\n  publish:\n    name: Publish commit\n    runs-on: ubuntu-latest\n    if: github.repository == 'tinylibs/tinypool'\n\n    steps:\n      - uses: actions/checkout@v2\n\n      - name: Use Node.js 22.x\n        uses: actions/setup-node@v1\n        with:\n          node-version: 22.x\n\n      - uses: pnpm/action-setup@v2\n\n      - name: Install Dependencies\n        run: pnpm install\n\n      - name: Build\n        run: pnpm build\n\n      - run: pnpx pkg-pr-new publish --compact\n"
  },
  {
    "path": ".gitignore",
    "content": ".nyc_output\n.vscode\n.idea\nnode_modules\ndist\ncoverage\n"
  },
  {
    "path": ".npmignore",
    "content": ".github\n.nyc_output\npackage-lock.json\ncoverage\nexamples\n"
  },
  {
    "path": ".prettierrc",
    "content": "{\n  \"endOfLine\": \"auto\",\n  \"singleQuote\": true,\n  \"semi\": false,\n  \"trailingComma\": \"es5\"\n}\n"
  },
  {
    "path": ".taprc",
    "content": "check-coverage: false\ncolor: true\ncoverage: true\ncoverage-report:\n  - html\n  - text\njobs: 2\nno-browser: true\ntest-env: TS_NODE_PROJECT=test/tsconfig.json\ntest-ignore: $.\ntest-regex: ((\\/|^)(tests?|__tests?__)\\/.*|\\.(tests?|spec)|^\\/?tests?)\\.([mc]js|ts)$\ntimeout: 60\nts: true\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participation in our\ncommunity a harassment-free experience for everyone, regardless of age, body\nsize, visible or invisible disability, ethnicity, sex characteristics, gender\nidentity and expression, level of experience, education, socio-economic status,\nnationality, personal appearance, race, religion, or sexual identity\nand orientation.\n\nWe pledge to act and interact in ways that contribute to an open, welcoming,\ndiverse, inclusive, and healthy community.\n\n## Our Standards\n\nExamples of behavior that contributes to a positive environment for our\ncommunity include:\n\n- Demonstrating empathy and kindness toward other people\n- Being respectful of differing opinions, viewpoints, and experiences\n- Giving and gracefully accepting constructive feedback\n- Accepting responsibility and apologizing to those affected by our mistakes,\n  and learning from the experience\n- Focusing on what is best not just for us as individuals, but for the\n  overall community\n\nExamples of unacceptable behavior include:\n\n- The use of sexualized language or imagery, and sexual attention or\n  advances of any kind\n- Trolling, insulting or derogatory comments, and personal or political attacks\n- Public or private harassment\n- Publishing others' private information, such as a physical or email\n  address, without their explicit permission\n- Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n## Enforcement Responsibilities\n\nCommunity leaders are responsible for clarifying and enforcing our standards of\nacceptable behavior and will take appropriate and fair corrective action in\nresponse to any behavior that they deem inappropriate, threatening, offensive,\nor harmful.\n\nCommunity leaders have the right and responsibility to remove, edit, or reject\ncomments, commits, code, wiki edits, issues, and other contributions that are\nnot aligned to this Code of Conduct, and will communicate reasons for moderation\ndecisions when appropriate.\n\n## Scope\n\nThis Code of Conduct applies within all community spaces, and also applies when\nan individual is officially representing the community in public spaces.\nExamples of representing our community include using an official e-mail address,\nposting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported to the community leaders responsible for enforcement at\njasnell@gmail.com, anna@addaleax.net, or matteo.collina@gmail.com.\nAll complaints will be reviewed and investigated promptly and fairly.\n\nAll community leaders are obligated to respect the privacy and security of the\nreporter of any incident.\n\n## Enforcement Guidelines\n\nCommunity leaders will follow these Community Impact Guidelines in determining\nthe consequences for any action they deem in violation of this Code of Conduct:\n\n### 1. Correction\n\n**Community Impact**: Use of inappropriate language or other behavior deemed\nunprofessional or unwelcome in the community.\n\n**Consequence**: A private, written warning from community leaders, providing\nclarity around the nature of the violation and an explanation of why the\nbehavior was inappropriate. A public apology may be requested.\n\n### 2. Warning\n\n**Community Impact**: A violation through a single incident or series\nof actions.\n\n**Consequence**: A warning with consequences for continued behavior. No\ninteraction with the people involved, including unsolicited interaction with\nthose enforcing the Code of Conduct, for a specified period of time. This\nincludes avoiding interactions in community spaces as well as external channels\nlike social media. Violating these terms may lead to a temporary or\npermanent ban.\n\n### 3. Temporary Ban\n\n**Community Impact**: A serious violation of community standards, including\nsustained inappropriate behavior.\n\n**Consequence**: A temporary ban from any sort of interaction or public\ncommunication with the community for a specified period of time. No public or\nprivate interaction with the people involved, including unsolicited interaction\nwith those enforcing the Code of Conduct, is allowed during this period.\nViolating these terms may lead to a permanent ban.\n\n### 4. Permanent Ban\n\n**Community Impact**: Demonstrating a pattern of violation of community\nstandards, including sustained inappropriate behavior, harassment of an\nindividual, or aggression toward or disparagement of classes of individuals.\n\n**Consequence**: A permanent ban from any sort of public interaction within\nthe community.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage],\nversion 2.0, available at\nhttps://www.contributor-covenant.org/version/2/0/code_of_conduct.html.\n\nCommunity Impact Guidelines were inspired by [Mozilla's code of conduct\nenforcement ladder](https://github.com/mozilla/diversity).\n\n[homepage]: https://www.contributor-covenant.org\n\nFor answers to common questions about this code of conduct, see the FAQ at\nhttps://www.contributor-covenant.org/faq. Translations are available at\nhttps://www.contributor-covenant.org/translations.\n"
  },
  {
    "path": "CONTRIBUTING",
    "content": "# Piscina is an OPEN Open Source Project\n\n## What?\n\nIndividuals making significant and valuable contributions are given commit-access to the project to contribute as they see fit. This project is more like an open wiki than a standard guarded open source project.\n\n## Rules\n\nThere are a few basic ground-rules for contributors:\n\n1. **No `--force` pushes** on `master` or modifying the Git history in any way after a PR has been merged.\n1. **Non-master branches** ought to be used for ongoing work.\n1. **External API changes and significant modifications** ought to be subject to an **internal pull-request** to solicit feedback from other contributors.\n1. Internal pull-requests to solicit feedback are *encouraged* for any other non-trivial contribution but left to the discretion of the contributor.\n1. Contributors should attempt to adhere to the prevailing code-style.\n1. 100% code coverage\n1. Semantic Versioning is used.\n\n## Releases\n\nDeclaring formal releases remains the prerogative of the project maintainer.\n\n## Changes to this arrangement\n\nThis document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change.\n\n-----------------------------------------\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2020 James M Snell and the Piscina contributors\n\nPiscina contributors listed at https://github.com/jasnell/piscina#the-team and\nin the README file.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Tinypool - the node.js worker pool 🧵\n\n> Piscina: A fast, efficient Node.js Worker Thread Pool implementation\n\nTinypool is a fork of piscina. What we try to achieve in this library, is to eliminate some dependencies and features that our target users don't need (currently, our main user will be Vitest). Tinypool's install size (38KB) can then be smaller than Piscina's install size (6MB when Tinypool was created, Piscina has since reduced it's size to ~800KB). If you need features like [utilization](https://github.com/piscinajs/piscina#property-utilization-readonly) or OS-specific thread priority setting, [Piscina](https://github.com/piscinajs/piscina) is a better choice for you. We think that Piscina is an amazing library, and we may try to upstream some of the dependencies optimization in this fork.\n\n- ✅ Smaller install size, 38KB\n- ✅ Minimal\n- ✅ No dependencies\n- ✅ Physical cores instead of Logical cores with [physical-cpu-count](https://www.npmjs.com/package/physical-cpu-count)\n- ✅ Supports `worker_threads` and `child_process`\n- ❌ No utilization\n- ❌ No OS-specific thread priority setting\n\n- Written in TypeScript, and ESM support only. For Node.js 18.x and higher.\n\n_In case you need more tiny libraries like tinypool or tinyspy, please consider submitting an [RFC](https://github.com/tinylibs/rfcs)_\n\n## Example\n\n### Using `node:worker_threads`\n\n#### Basic usage\n\n```js\n// main.mjs\nimport Tinypool from 'tinypool'\n\nconst pool = new Tinypool({\n  filename: new URL('./worker.mjs', import.meta.url).href,\n})\nconst result = await pool.run({ a: 4, b: 6 })\nconsole.log(result) // Prints 10\n\n// Make sure to destroy pool once it's not needed anymore\n// This terminates all pool's idle workers\nawait pool.destroy()\n```\n\n```js\n// worker.mjs\nexport default ({ a, b }) => {\n  return a + b\n}\n```\n\n#### Main thread <-> worker thread communication\n\n<details>\n  <summary>See code</summary>\n\n```js\n// main.mjs\nimport Tinypool from 'tinypool'\nimport { MessageChannel } from 'node:worker_threads'\n\nconst pool = new Tinypool({\n  filename: new URL('./worker.mjs', import.meta.url).href,\n})\nconst { port1, port2 } = new MessageChannel()\nconst promise = pool.run({ port: port1 }, { transferList: [port1] })\n\nport2.on('message', (message) => console.log('Main thread received:', message))\nsetTimeout(() => port2.postMessage('Hello from main thread!'), 1000)\n\nawait promise\n\nport1.close()\nport2.close()\n```\n\n```js\n// worker.mjs\nexport default ({ port }) => {\n  return new Promise((resolve) => {\n    port.on('message', (message) => {\n      console.log('Worker received:', message)\n\n      port.postMessage('Hello from worker thread!')\n      resolve()\n    })\n  })\n}\n```\n\n</details>\n\n### Using `node:child_process`\n\n#### Basic usage\n\n<details>\n  <summary>See code</summary>\n\n```js\n// main.mjs\nimport Tinypool from 'tinypool'\n\nconst pool = new Tinypool({\n  runtime: 'child_process',\n  filename: new URL('./worker.mjs', import.meta.url).href,\n})\nconst result = await pool.run({ a: 4, b: 6 })\nconsole.log(result) // Prints 10\n```\n\n```js\n// worker.mjs\nexport default ({ a, b }) => {\n  return a + b\n}\n```\n\n</details>\n\n#### Main process <-> worker process communication\n\n<details>\n  <summary>See code</summary>\n\n```js\n// main.mjs\nimport Tinypool from 'tinypool'\n\nconst pool = new Tinypool({\n  runtime: 'child_process',\n  filename: new URL('./worker.mjs', import.meta.url).href,\n})\n\nconst messages = []\nconst listeners = []\nconst channel = {\n  onMessage: (listener) => listeners.push(listener),\n  postMessage: (message) => messages.push(message),\n}\n\nconst promise = pool.run({}, { channel })\n\n// Send message to worker\nsetTimeout(\n  () => listeners.forEach((listener) => listener('Hello from main process')),\n  1000\n)\n\n// Wait for task to finish\nawait promise\n\nconsole.log(messages)\n// [{ received: 'Hello from main process', response: 'Hello from worker' }]\n```\n\n```js\n// worker.mjs\nexport default async function run() {\n  return new Promise((resolve) => {\n    process.on('message', (message) => {\n      // Ignore Tinypool's internal messages\n      if (message?.__tinypool_worker_message__) return\n\n      process.send({ received: message, response: 'Hello from worker' })\n      resolve()\n    })\n  })\n}\n```\n\n</details>\n\n## API\n\nWe have a similar API to Piscina, so for more information, you can read Piscina's detailed [documentation](https://github.com/piscinajs/piscina#piscina---the-nodejs-worker-pool) and apply the same techniques here.\n\n### Tinypool specific APIs\n\n#### Pool constructor options\n\n- `isolateWorkers`: Disabled by default. Always starts with a fresh worker when running tasks to isolate the environment.\n- `terminateTimeout`: Disabled by default. If terminating a worker takes `terminateTimeout` amount of milliseconds to execute, an error is raised.\n- `maxMemoryLimitBeforeRecycle`: Disabled by default. When defined, the worker's heap memory usage is compared against this value after task has been finished. If the current memory usage exceeds this limit, worker is terminated and a new one is started to take its place. This option is useful when your tasks leak memory and you don't want to enable `isolateWorkers` option.\n- `runtime`: Used to pick worker runtime. Default value is `worker_threads`.\n  - `worker_threads`: Runs workers in [`node:worker_threads`](https://nodejs.org/api/worker_threads.html). For `main thread <-> worker thread` communication you can use [`MessagePort`](https://nodejs.org/api/worker_threads.html#class-messageport) in the `pool.run()` method's [`transferList` option](https://nodejs.org/api/worker_threads.html#portpostmessagevalue-transferlist). See [example](#main-thread---worker-thread-communication).\n  - `child_process`: Runs workers in [`node:child_process`](https://nodejs.org/api/child_process.html). For `main thread <-> worker process` communication you can use `TinypoolChannel` in the `pool.run()` method's `channel` option. For filtering out the Tinypool's internal messages see `TinypoolWorkerMessage`. See [example](#main-process---worker-process-communication).\n- `teardown`: name of the function in file that should be called before worker is terminated. Must be named exported.\n- `serialization`: Specify the kind of serialization used for the `child_process` runtime. Possible values are `'json'` and `'advanced'`. See Node.js [Advanced serialization](https://nodejs.org/docs/latest/api/child_process.html#advanced-serialization) for more details.\n\n#### Pool methods\n\n- `cancelPendingTasks()`: Gracefully cancels all pending tasks without stopping or interfering with on-going tasks. This method is useful when your tasks may have side effects and should not be terminated forcefully during task execution. If your tasks don't have any side effects you may want to use [`{ signal }`](https://github.com/piscinajs/piscina#cancelable-tasks) option for forcefully terminating all tasks, including the on-going ones, instead.\n- `recycleWorkers(options)`: Waits for all current tasks to finish and re-creates all workers. Can be used to force isolation imperatively even when `isolateWorkers` is disabled. Accepts `{ runtime }` option as argument.\n\n#### Exports\n\n- `workerId`: Each worker now has an id ( <= `maxThreads`) that can be imported from `tinypool` in the worker itself (or `process.__tinypool_state__.workerId`).\n\n## Authors\n\n| <a href=\"https://github.com/Aslemammad\"> <img width='150' src=\"https://avatars.githubusercontent.com/u/37929992?v=4\" /><br> Mohammad Bagher </a> |\n| ------------------------------------------------------------------------------------------------------------------------------------------------ |\n\n## Sponsors\n\nYour sponsorship can make a huge difference in continuing our work in open source!\n\n<p align=\"center\">\n  <a href=\"https://cdn.jsdelivr.net/gh/aslemammad/static/sponsors.svg\">\n    <img src='https://cdn.jsdelivr.net/gh/aslemammad/static/sponsors.svg'/>\n  </a>\n</p>\n\n## Credits\n\n[The Vitest team](https://vitest.dev/) for giving me the chance of creating and maintaing this project for vitest.\n\n[Piscina](https://github.com/piscinajs/piscina), because Tinypool is not more than a friendly fork of piscina.\n"
  },
  {
    "path": "benchmark/fixtures/add-process.mjs",
    "content": "import add from './add.mjs'\n\nprocess.on('message', (message) => {\n  process.send(add(message))\n})\n"
  },
  {
    "path": "benchmark/fixtures/add-worker.mjs",
    "content": "import { parentPort } from 'node:worker_threads'\n\nimport add from './add.mjs'\n\nparentPort.on('message', (message) => {\n  parentPort.postMessage(add(message))\n})\n"
  },
  {
    "path": "benchmark/fixtures/add.mjs",
    "content": "export default ({ a, b }) => a + b\n"
  },
  {
    "path": "benchmark/isolate-benchmark.bench.ts",
    "content": "import { bench } from 'vitest'\nimport { cpus } from 'node:os'\nimport { Worker } from 'node:worker_threads'\nimport { fork } from 'node:child_process'\nimport Tinypool, { type Options } from '../dist/index'\n\nconst THREADS = cpus().length - 1\nconst ROUNDS = THREADS * 10\nconst ITERATIONS = 100\n\nfor (const runtime of [\n  'worker_threads',\n  'child_process',\n] as Options['runtime'][]) {\n  bench(\n    `Tinypool { runtime: '${runtime}' }`,\n    async () => {\n      const pool = new Tinypool({\n        runtime,\n        filename: './benchmark/fixtures/add.mjs',\n        isolateWorkers: true,\n        minThreads: THREADS,\n        maxThreads: THREADS,\n      })\n\n      await Promise.all(\n        Array(ROUNDS)\n          .fill(0)\n          .map(() => pool.run({ a: 1, b: 2 }))\n      )\n\n      await pool.destroy()\n    },\n    { iterations: ITERATIONS }\n  )\n}\n\nfor (const { task, name } of [\n  { name: 'worker_threads', task: workerThreadTask },\n  { name: 'child_process', task: childProcessTask },\n] as const) {\n  bench(\n    `node:${name}`,\n    async () => {\n      const pool = Array(ROUNDS).fill(task)\n\n      await Promise.all(\n        Array(THREADS)\n          .fill(execute)\n          .map((_task) => _task())\n      )\n\n      async function execute() {\n        const _task = pool.shift()\n\n        if (_task) {\n          await _task()\n          return execute()\n        }\n      }\n    },\n    { iterations: ITERATIONS }\n  )\n}\n\nasync function workerThreadTask() {\n  const worker = new Worker('./benchmark/fixtures/add-worker.mjs')\n  const onMessage = new Promise<void>((resolve, reject) =>\n    worker.on('message', (sum) => (sum === 3 ? resolve() : reject('Not 3')))\n  )\n\n  worker.postMessage({ a: 1, b: 2 })\n  await onMessage\n\n  await worker.terminate()\n}\n\nasync function childProcessTask() {\n  const subprocess = fork('./benchmark/fixtures/add-process.mjs')\n\n  const onExit = new Promise((resolve) => subprocess.on('exit', resolve))\n  const onMessage = new Promise<void>((resolve, reject) =>\n    subprocess.on('message', (sum) => (sum === 3 ? resolve() : reject('Not 3')))\n  )\n\n  subprocess.send({ a: 1, b: 2 })\n  await onMessage\n\n  subprocess.kill()\n  await onExit\n}\n"
  },
  {
    "path": "benchmark/simple.bench.ts",
    "content": "import { bench } from 'vitest'\nimport Tinypool from '../dist/index'\n\nbench(\n  'simple',\n  async () => {\n    const pool = new Tinypool({\n      filename: './benchmark/fixtures/add.mjs',\n    })\n\n    const tasks: Promise<void>[] = []\n\n    while (pool.queueSize === 0) {\n      tasks.push(pool.run({ a: 4, b: 6 }))\n    }\n\n    await Promise.all(tasks)\n    await pool.destroy()\n  },\n  { time: 10_000 }\n)\n"
  },
  {
    "path": "eslint.config.js",
    "content": "import { readFileSync } from 'node:fs'\nimport eslint from '@eslint/js'\nimport tseslint from 'typescript-eslint'\nimport eslintPluginUnicorn from 'eslint-plugin-unicorn'\nimport eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended'\n\nconst tsconfig = JSON.parse(readFileSync('./tsconfig.json', 'utf8'))\n\nexport default defineConfig([\n  eslint.configs.recommended,\n  ...tseslint.configs.recommended,\n  ...tseslint.configs.recommendedTypeChecked.map((config) => ({\n    ...config,\n    files: tsconfig.include,\n  })),\n  {\n    files: tsconfig.include,\n    languageOptions: {\n      parserOptions: {\n        project: true,\n        tsconfigRootDir: import.meta.dirname,\n      },\n    },\n  },\n  {\n    languageOptions: {\n      globals: {\n        process: 'readonly',\n      },\n    },\n    plugins: { unicorn: eslintPluginUnicorn },\n    rules: {\n      'unicorn/prefer-node-protocol': 'error',\n      '@typescript-eslint/no-unused-vars': [\n        'error',\n        { varsIgnorePattern: '^_' },\n      ],\n      '@typescript-eslint/consistent-type-imports': [\n        'error',\n        {\n          prefer: 'type-imports',\n          fixStyle: 'inline-type-imports',\n          disallowTypeAnnotations: false,\n        },\n      ],\n\n      // TODO: Nice-to-have rules\n      '@typescript-eslint/no-unsafe-argument': 'off',\n      '@typescript-eslint/no-unsafe-assignment': 'off',\n      '@typescript-eslint/no-explicit-any': 'off',\n      '@typescript-eslint/no-unsafe-member-access': 'off',\n      '@typescript-eslint/no-unsafe-return': 'off',\n      '@typescript-eslint/no-redundant-type-constituents': 'off',\n      '@typescript-eslint/no-non-null-asserted-optional-chain': 'off',\n      '@typescript-eslint/no-namespace': 'off',\n    },\n  },\n  {\n    files: ['**/*.test.ts'],\n    rules: {\n      '@typescript-eslint/require-await': 'off',\n    },\n  },\n  { ignores: ['dist'] },\n  eslintPluginPrettierRecommended,\n])\n\n/** @param config {import('eslint').Linter.Config} */\nfunction defineConfig(config) {\n  return config\n}\n"
  },
  {
    "path": "global.d.ts",
    "content": "// only for tsdown build, excluded from the final tgz\ndeclare namespace NodeJS {\n  interface Process {\n    __tinypool_state__: {\n      isTinypoolWorker: boolean\n      isWorkerThread?: boolean\n      isChildProcess?: boolean\n      workerData: any\n      workerId: number\n    }\n  }\n}\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"tinypool\",\n  \"type\": \"module\",\n  \"version\": \"2.1.0\",\n  \"packageManager\": \"pnpm@9.0.6\",\n  \"description\": \"A minimal and tiny Node.js Worker Thread Pool implementation, a fork of piscina, but with fewer features\",\n  \"license\": \"MIT\",\n  \"homepage\": \"https://github.com/tinylibs/tinypool#readme\",\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"https://github.com/tinylibs/tinypool.git\"\n  },\n  \"bugs\": {\n    \"url\": \"https://github.com/tinylibs/tinypool/issues\"\n  },\n  \"keywords\": [\n    \"fast\",\n    \"worker threads\",\n    \"thread pool\"\n  ],\n  \"exports\": {\n    \".\": {\n      \"types\": \"./dist/index.d.ts\",\n      \"default\": \"./dist/index.js\"\n    },\n    \"./package.json\": \"./package.json\"\n  },\n  \"main\": \"./dist/index.js\",\n  \"module\": \"./dist/index.js\",\n  \"types\": \"./dist/index.d.ts\",\n  \"files\": [\n    \"dist\"\n  ],\n  \"engines\": {\n    \"node\": \"^20.0.0 || >=22.0.0\"\n  },\n  \"scripts\": {\n    \"test\": \"vitest\",\n    \"bench\": \"vitest bench\",\n    \"dev\": \"tsdown --watch ./src\",\n    \"build\": \"tsdown\",\n    \"publish\": \"clean-publish\",\n    \"lint\": \"eslint --max-warnings=0\",\n    \"typecheck\": \"tsc --noEmit\"\n  },\n  \"devDependencies\": {\n    \"@types/node\": \"^20.12.8\",\n    \"clean-publish\": \"^3.4.4\",\n    \"eslint\": \"^9.4.0\",\n    \"eslint-config-prettier\": \"^9.1.0\",\n    \"eslint-plugin-prettier\": \"^5.1.3\",\n    \"eslint-plugin-unicorn\": \"^53.0.0\",\n    \"prettier\": \"^3.3.2\",\n    \"tsdown\": \"^0.11.3\",\n    \"typescript\": \"^5.4.5\",\n    \"typescript-eslint\": \"^7.13.0\",\n    \"vite\": \"^5.2.11\",\n    \"vitest\": \"^4.0.1\"\n  }\n}\n"
  },
  {
    "path": "src/common.ts",
    "content": "import type { MessagePort, TransferListItem } from 'node:worker_threads'\nimport type { SerializationType } from 'node:child_process'\n\n/** Channel for communicating between main thread and workers */\nexport interface TinypoolChannel {\n  /** Workers subscribing to messages */\n  onMessage?: (callback: (message: any) => void) => void\n\n  /** Called with worker's messages */\n  postMessage?: (message: any) => void\n\n  /** Called when channel can be closed */\n  onClose?: () => void\n}\n\nexport interface TinypoolWorker {\n  runtime: string\n  initialize(options: {\n    env?: Record<string, string>\n    argv?: string[]\n    execArgv?: string[]\n    resourceLimits?: any\n    workerData: TinypoolData\n    trackUnmanagedFds?: boolean\n    serialization?: SerializationType\n  }): void\n  terminate(): Promise<any>\n  postMessage(message: any, transferListItem?: TransferListItem[]): void\n  setChannel?: (channel: TinypoolChannel) => void\n  on(event: string, listener: (...args: any[]) => void): void\n  once(event: string, listener: (...args: any[]) => void): void\n  emit(event: string, ...data: any[]): void\n  ref?: () => void\n  unref?: () => void\n  threadId: number\n}\n\n/**\n * Tinypool's internal messaging between main thread and workers.\n * - Utilizers can use `__tinypool_worker_message__` property to identify\n *   these messages and ignore them.\n */\nexport interface TinypoolWorkerMessage<\n  T extends 'port' | 'pool' = 'port' | 'pool',\n> {\n  __tinypool_worker_message__: true\n  source: T\n}\n\nexport interface StartupMessage {\n  filename: string | null\n  name: string\n  port: MessagePort\n  sharedBuffer: Int32Array\n  useAtomics: boolean\n}\n\nexport interface RequestMessage {\n  taskId: number\n  task: any\n  filename: string\n  name: string\n}\n\nexport interface ReadyMessage {\n  ready: true\n}\n\nexport interface ResponseMessage {\n  taskId: number\n  result: any\n  error: unknown | null\n  usedMemory: number\n}\n\nexport interface TinypoolPrivateData {\n  workerId: number\n}\n\nexport type TinypoolData = [TinypoolPrivateData, any] // [{ ... }, workerData]\n\n// Internal symbol used to mark Transferable objects returned\n// by the Tinypool.move() function\nconst kMovable = Symbol('Tinypool.kMovable')\nexport const kTransferable = Symbol.for('Tinypool.transferable')\nexport const kValue = Symbol.for('Tinypool.valueOf')\nexport const kQueueOptions = Symbol.for('Tinypool.queueOptions')\n\n// True if the object implements the Transferable interface\nexport function isTransferable(value: any): boolean {\n  return (\n    value != null &&\n    typeof value === 'object' &&\n    kTransferable in value &&\n    kValue in value\n  )\n}\n\n// True if object implements Transferable and has been returned\n// by the Tinypool.move() function\nexport function isMovable(value: any): boolean {\n  return isTransferable(value) && value[kMovable] === true\n}\n\nexport function markMovable(value: object): void {\n  Object.defineProperty(value, kMovable, {\n    enumerable: false,\n    configurable: true,\n    writable: true,\n    value: true,\n  })\n}\n\nexport interface Transferable {\n  readonly [kTransferable]: object\n  readonly [kValue]: object\n}\n\nexport interface Task {\n  readonly [kQueueOptions]: object | null\n  cancel(): void\n}\n\nexport interface TaskQueue {\n  readonly size: number\n  shift(): Task | null\n  remove(task: Task): void\n  push(task: Task): void\n  cancel(): void\n}\n\nexport function isTaskQueue(value: any): boolean {\n  return (\n    typeof value === 'object' &&\n    value !== null &&\n    'size' in value &&\n    typeof value.shift === 'function' &&\n    typeof value.remove === 'function' &&\n    typeof value.push === 'function'\n  )\n}\n\nexport const kRequestCountField = 0\nexport const kResponseCountField = 1\nexport const kFieldCount = 2\n"
  },
  {
    "path": "src/entry/process.ts",
    "content": "import { stderr, stdout } from '../utils'\nimport {\n  type ReadyMessage,\n  type RequestMessage,\n  type ResponseMessage,\n  type StartupMessage,\n  type TinypoolWorkerMessage,\n} from '../common'\nimport { getHandler, throwInNextTick } from './utils'\n\ntype IncomingMessage =\n  | (StartupMessage & TinypoolWorkerMessage<'pool'>)\n  | (RequestMessage & TinypoolWorkerMessage<'port'>)\n\ntype OutgoingMessage =\n  | (ReadyMessage & TinypoolWorkerMessage<'pool'>)\n  | (ResponseMessage & TinypoolWorkerMessage<'port'>)\n\nprocess.__tinypool_state__ = {\n  isChildProcess: true,\n  isTinypoolWorker: true,\n  workerData: null,\n  workerId: Number(process.env.TINYPOOL_WORKER_ID),\n}\n\nconst memoryUsage = process.memoryUsage.bind(process)\nconst send = process.send!.bind(process)\n\nprocess.on('message', (message: IncomingMessage) => {\n  // Message was not for port or pool\n  // It's likely end-users own communication between main and worker\n  if (!message || !message.__tinypool_worker_message__) return\n\n  if (message.source === 'pool') {\n    const { filename, name } = message\n\n    ;(async function () {\n      if (filename !== null) {\n        await getHandler(filename, name)\n      }\n\n      send(\n        <OutgoingMessage>{\n          ready: true,\n          source: 'pool',\n          __tinypool_worker_message__: true,\n        },\n        () => {\n          // Ignore errors coming from closed channel\n        }\n      )\n    })().catch(throwInNextTick)\n\n    return\n  }\n\n  if (message.source === 'port') {\n    onMessage(message).catch(throwInNextTick)\n    return\n  }\n\n  throw new Error(`Unexpected TinypoolWorkerMessage ${JSON.stringify(message)}`)\n})\n\nasync function onMessage(message: IncomingMessage & { source: 'port' }) {\n  const { taskId, task, filename, name } = message\n  let response: OutgoingMessage & Pick<typeof message, 'source'>\n\n  try {\n    const handler = await getHandler(filename, name)\n    if (handler === null) {\n      throw new Error(\n        `No handler function \"${name}\" exported from \"${filename}\"`\n      )\n    }\n    const result = await handler(task)\n    response = {\n      source: 'port',\n      __tinypool_worker_message__: true,\n      taskId,\n      result,\n      error: null,\n      usedMemory: memoryUsage().heapUsed,\n    }\n\n    // If the task used e.g. console.log(), wait for the stream to drain\n    // before potentially entering the `Atomics.wait()` loop, and before\n    // returning the result so that messages will always be printed even\n    // if the process would otherwise be ready to exit.\n    if (stdout()?.writableLength! > 0) {\n      await new Promise((resolve) => process.stdout.write('', resolve))\n    }\n    if (stderr()?.writableLength! > 0) {\n      await new Promise((resolve) => process.stderr.write('', resolve))\n    }\n  } catch (error) {\n    response = {\n      source: 'port',\n      __tinypool_worker_message__: true,\n      taskId,\n      result: null,\n      error: serializeError(error),\n      usedMemory: memoryUsage().heapUsed,\n    }\n  }\n\n  send(response)\n}\n\nfunction serializeError(error: unknown) {\n  if (error instanceof Error) {\n    return {\n      ...error,\n      name: error.name,\n      stack: error.stack,\n      message: error.message,\n    }\n  }\n\n  return String(error)\n}\n"
  },
  {
    "path": "src/entry/utils.ts",
    "content": "import { pathToFileURL } from 'node:url'\n\n// Get `import(x)` as a function that isn't transpiled to `require(x)` by\n// TypeScript for dual ESM/CJS support.\n// Load this lazily, so that there is no warning about the ESM loader being\n// experimental (on Node v12.x) until we actually try to use it.\nlet importESMCached: (specifier: string) => Promise<any> | undefined\n\nfunction getImportESM() {\n  if (importESMCached === undefined) {\n    // eslint-disable-next-line @typescript-eslint/no-implied-eval -- intentional\n    importESMCached = new Function(\n      'specifier',\n      'return import(specifier)'\n    ) as typeof importESMCached\n  }\n  return importESMCached\n}\n\n// eslint-disable-next-line @typescript-eslint/ban-types -- Intentional general type\ntype Handler = Function\nconst handlerCache: Map<string, Handler> = new Map()\n\n// Look up the handler function that we call when a task is posted.\n// This is either going to be \"the\" export from a file, or the default export.\nexport async function getHandler(\n  filename: string,\n  name: string\n): Promise<Handler | null> {\n  let handler = handlerCache.get(`${filename}/${name}`)\n  if (handler !== undefined) {\n    return handler\n  }\n\n  try {\n    const handlerModule = await import(filename)\n\n    // Check if the default export is an object, because dynamic import\n    // resolves with `{ default: { default: [Function] } }` for CJS modules.\n    handler =\n      (typeof handlerModule.default !== 'function' && handlerModule.default) ||\n      handlerModule\n\n    if (typeof handler !== 'function') {\n      handler = await (handler as any)[name]\n    }\n  } catch {\n    // Ignore error and retry import\n  }\n  if (typeof handler !== 'function') {\n    handler = await getImportESM()(pathToFileURL(filename).href)\n    if (typeof handler !== 'function') {\n      handler = await (handler as any)[name]\n    }\n  }\n  if (typeof handler !== 'function') {\n    return null\n  }\n\n  // Limit the handler cache size. This should not usually be an issue and is\n  // only provided for pathological cases.\n  if (handlerCache.size > 1000) {\n    const [handler] = handlerCache\n    const key = handler![0]\n    handlerCache.delete(key)\n  }\n\n  handlerCache.set(`${filename}/${name}`, handler)\n  return handler\n}\n\nexport function throwInNextTick(error: Error) {\n  process.nextTick(() => {\n    throw error\n  })\n}\n"
  },
  {
    "path": "src/entry/worker.ts",
    "content": "import {\n  parentPort,\n  type MessagePort,\n  receiveMessageOnPort,\n  workerData as tinypoolData,\n} from 'node:worker_threads'\nimport {\n  type ReadyMessage,\n  type RequestMessage,\n  type ResponseMessage,\n  type StartupMessage,\n  type TinypoolData,\n  kResponseCountField,\n  kRequestCountField,\n  isMovable,\n  kTransferable,\n  kValue,\n} from '../common'\nimport { stderr, stdout } from '../utils'\nimport { getHandler, throwInNextTick } from './utils'\n\nconst [tinypoolPrivateData, workerData] = tinypoolData as TinypoolData\n\nprocess.__tinypool_state__ = {\n  isWorkerThread: true,\n  isTinypoolWorker: true,\n  workerData: workerData,\n  workerId: tinypoolPrivateData.workerId,\n}\n\nconst memoryUsage = process.memoryUsage.bind(process)\nlet useAtomics: boolean = process.env.PISCINA_DISABLE_ATOMICS !== '1'\n\n// We should only receive this message once, when the Worker starts. It gives\n// us the MessagePort used for receiving tasks, a SharedArrayBuffer for fast\n// communication using Atomics, and the name of the default filename for tasks\n// (so we can pre-load and cache the handler).\nparentPort!.on('message', (message: StartupMessage) => {\n  useAtomics =\n    process.env.PISCINA_DISABLE_ATOMICS === '1' ? false : message.useAtomics\n\n  const { port, sharedBuffer, filename, name } = message\n\n  ;(async function () {\n    if (filename !== null) {\n      await getHandler(filename, name)\n    }\n\n    const readyMessage: ReadyMessage = { ready: true }\n    parentPort!.postMessage(readyMessage)\n\n    port.start()\n\n    port.on('message', onMessage.bind(null, port, sharedBuffer))\n    atomicsWaitLoop(port, sharedBuffer)\n  })().catch(throwInNextTick)\n})\n\nlet currentTasks: number = 0\nlet lastSeenRequestCount: number = 0\nfunction atomicsWaitLoop(port: MessagePort, sharedBuffer: Int32Array) {\n  if (!useAtomics) return\n\n  // This function is entered either after receiving the startup message, or\n  // when we are done with a task. In those situations, the *only* thing we\n  // expect to happen next is a 'message' on `port`.\n  // That call would come with the overhead of a C++ → JS boundary crossing,\n  // including async tracking. So, instead, if there is no task currently\n  // running, we wait for a signal from the parent thread using Atomics.wait(),\n  // and read the message from the port instead of generating an event,\n  // in order to avoid that overhead.\n  // The one catch is that this stops asynchronous operations that are still\n  // running from proceeding. Generally, tasks should not spawn asynchronous\n  // operations without waiting for them to finish, though.\n  while (currentTasks === 0) {\n    // Check whether there are new messages by testing whether the current\n    // number of requests posted by the parent thread matches the number of\n    // requests received.\n    Atomics.wait(sharedBuffer, kRequestCountField, lastSeenRequestCount)\n    lastSeenRequestCount = Atomics.load(sharedBuffer, kRequestCountField)\n\n    // We have to read messages *after* updating lastSeenRequestCount in order\n    // to avoid race conditions.\n    let entry\n    while ((entry = receiveMessageOnPort(port)) !== undefined) {\n      onMessage(port, sharedBuffer, entry.message)\n    }\n  }\n}\n\nfunction onMessage(\n  port: MessagePort,\n  sharedBuffer: Int32Array,\n  message: RequestMessage\n) {\n  currentTasks++\n  const { taskId, task, filename, name } = message\n\n  ;(async function () {\n    let response: ResponseMessage\n    let transferList: any[] = []\n    try {\n      const handler = await getHandler(filename, name)\n      if (handler === null) {\n        throw new Error(\n          `No handler function \"${name}\" exported from \"${filename}\"`\n        )\n      }\n      let result = await handler(task)\n      if (isMovable(result)) {\n        transferList = transferList.concat(result[kTransferable])\n        result = result[kValue]\n      }\n      response = {\n        taskId,\n        result: result,\n        error: null,\n        usedMemory: memoryUsage().heapUsed,\n      }\n\n      // If the task used e.g. console.log(), wait for the stream to drain\n      // before potentially entering the `Atomics.wait()` loop, and before\n      // returning the result so that messages will always be printed even\n      // if the process would otherwise be ready to exit.\n      if (stdout()?.writableLength! > 0) {\n        await new Promise((resolve) => process.stdout.write('', resolve))\n      }\n      if (stderr()?.writableLength! > 0) {\n        await new Promise((resolve) => process.stderr.write('', resolve))\n      }\n    } catch (error) {\n      response = {\n        taskId,\n        result: null,\n        // It may be worth taking a look at the error cloning algorithm we\n        // use in Node.js core here, it's quite a bit more flexible\n        error,\n        usedMemory: memoryUsage().heapUsed,\n      }\n    }\n    currentTasks--\n\n    // Post the response to the parent thread, and let it know that we have\n    // an additional message available. If possible, use Atomics.wait()\n    // to wait for the next message.\n    port.postMessage(response, transferList)\n    Atomics.add(sharedBuffer, kResponseCountField, 1)\n    atomicsWaitLoop(port, sharedBuffer)\n  })().catch(throwInNextTick)\n}\n"
  },
  {
    "path": "src/index.ts",
    "content": "import {\n  MessageChannel,\n  type MessagePort,\n  receiveMessageOnPort,\n} from 'node:worker_threads'\nimport type { SerializationType } from 'node:child_process'\nimport { once, EventEmitterAsyncResource } from 'node:events'\nimport { AsyncResource } from 'node:async_hooks'\nimport { fileURLToPath, URL } from 'node:url'\nimport { join } from 'node:path'\nimport { inspect, types } from 'node:util'\nimport assert from 'node:assert'\nimport { performance } from 'node:perf_hooks'\nimport { readFileSync } from 'node:fs'\nimport { availableParallelism } from 'node:os'\nimport {\n  type ReadyMessage,\n  type RequestMessage,\n  type ResponseMessage,\n  type StartupMessage,\n  kResponseCountField,\n  kRequestCountField,\n  kFieldCount,\n  type Transferable,\n  type Task,\n  type TaskQueue,\n  kQueueOptions,\n  isTransferable,\n  markMovable,\n  isMovable,\n  kTransferable,\n  kValue,\n  type TinypoolData,\n  type TinypoolWorker,\n  type TinypoolChannel,\n} from './common'\nimport ThreadWorker from './runtime/thread-worker'\nimport ProcessWorker from './runtime/process-worker'\n\ndeclare global {\n  namespace NodeJS {\n    interface Process {\n      __tinypool_state__: {\n        isTinypoolWorker: boolean\n        isWorkerThread?: boolean\n        isChildProcess?: boolean\n        workerData: any\n        workerId: number\n      }\n    }\n  }\n}\n\nconst cpuCount: number = availableParallelism()\n\ninterface AbortSignalEventTargetAddOptions {\n  once: boolean\n}\n\ninterface AbortSignalEventTarget {\n  addEventListener: (\n    name: 'abort',\n    listener: () => void,\n    options?: AbortSignalEventTargetAddOptions\n  ) => void\n  removeEventListener: (name: 'abort', listener: () => void) => void\n  aborted?: boolean\n}\ninterface AbortSignalEventEmitter {\n  off: (name: 'abort', listener: () => void) => void\n  once: (name: 'abort', listener: () => void) => void\n}\ntype AbortSignalAny = AbortSignalEventTarget | AbortSignalEventEmitter\nfunction onabort(abortSignal: AbortSignalAny, listener: () => void) {\n  if ('addEventListener' in abortSignal) {\n    abortSignal.addEventListener('abort', listener, { once: true })\n  } else {\n    abortSignal.once('abort', listener)\n  }\n}\nclass AbortError extends Error {\n  constructor() {\n    super('The task has been aborted')\n  }\n\n  get name() {\n    return 'AbortError'\n  }\n}\n\nclass CancelError extends Error {\n  constructor() {\n    super('The task has been cancelled')\n  }\n\n  get name() {\n    return 'CancelError'\n  }\n}\n\ntype ResourceLimits = Worker extends {\n  resourceLimits?: infer T\n}\n  ? T\n  : object\n\nclass ArrayTaskQueue implements TaskQueue {\n  tasks: Task[] = []\n\n  get size() {\n    return this.tasks.length\n  }\n\n  shift(): Task | null {\n    return this.tasks.shift() as Task\n  }\n\n  push(task: Task): void {\n    this.tasks.push(task)\n  }\n\n  remove(task: Task): void {\n    const index = this.tasks.indexOf(task)\n    assert.notStrictEqual(index, -1)\n    this.tasks.splice(index, 1)\n  }\n\n  cancel(): void {\n    while (this.tasks.length > 0) {\n      const task = this.tasks.pop()\n      task?.cancel()\n    }\n  }\n}\n\ninterface Options {\n  filename?: string | null\n  runtime?: 'worker_threads' | 'child_process'\n  name?: string\n  minThreads?: number\n  maxThreads?: number\n  idleTimeout?: number\n  terminateTimeout?: number\n  maxQueue?: number | 'auto'\n  concurrentTasksPerWorker?: number\n  useAtomics?: boolean\n  resourceLimits?: ResourceLimits\n  maxMemoryLimitBeforeRecycle?: number\n  argv?: string[]\n  execArgv?: string[]\n  env?: Record<string, string>\n  workerData?: any\n  taskQueue?: TaskQueue\n  trackUnmanagedFds?: boolean\n  isolateWorkers?: boolean\n  teardown?: string\n  serialization?: SerializationType\n}\n\ninterface FilledOptions extends Options {\n  filename: string | null\n  name: string\n  runtime: NonNullable<Options['runtime']>\n  minThreads: number\n  maxThreads: number\n  idleTimeout: number\n  maxQueue: number\n  concurrentTasksPerWorker: number\n  useAtomics: boolean\n  taskQueue: TaskQueue\n}\n\nconst kDefaultOptions: FilledOptions = {\n  filename: null,\n  name: 'default',\n  runtime: 'worker_threads',\n  minThreads: Math.max(cpuCount / 2, 1),\n  maxThreads: cpuCount,\n  idleTimeout: 0,\n  maxQueue: Infinity,\n  concurrentTasksPerWorker: 1,\n  useAtomics: true,\n  taskQueue: new ArrayTaskQueue(),\n  trackUnmanagedFds: true,\n}\n\ninterface RunOptions {\n  transferList?: TransferList\n  channel?: TinypoolChannel\n  filename?: string | null\n  signal?: AbortSignalAny | null\n  name?: string | null\n  runtime?: Options['runtime']\n}\n\ninterface FilledRunOptions extends RunOptions {\n  transferList: TransferList | never\n  filename: string | null\n  signal: AbortSignalAny | null\n  name: string | null\n}\n\nconst kDefaultRunOptions: FilledRunOptions = {\n  transferList: undefined,\n  filename: null,\n  signal: null,\n  name: null,\n}\n\nclass DirectlyTransferable implements Transferable {\n  #value: object\n  constructor(value: object) {\n    this.#value = value\n  }\n\n  get [kTransferable](): object {\n    return this.#value\n  }\n\n  get [kValue](): object {\n    return this.#value\n  }\n}\n\nclass ArrayBufferViewTransferable implements Transferable {\n  #view: ArrayBufferView\n  constructor(view: ArrayBufferView) {\n    this.#view = view\n  }\n\n  get [kTransferable](): object {\n    return this.#view.buffer\n  }\n\n  get [kValue](): object {\n    return this.#view\n  }\n}\n\nlet taskIdCounter = 0\n\ntype TaskCallback = (err: Error, result: any) => void\n// Grab the type of `transferList` off `MessagePort`. At the time of writing,\n// only ArrayBuffer and MessagePort are valid, but let's avoid having to update\n// our types here every time Node.js adds support for more objects.\ntype TransferList = MessagePort extends {\n  postMessage(value: any, transferList: infer T): any\n}\n  ? T\n  : never\ntype TransferListItem = TransferList extends (infer T)[] ? T : never\n\nfunction maybeFileURLToPath(filename: string): string {\n  return filename.startsWith('file:')\n    ? fileURLToPath(new URL(filename))\n    : filename\n}\n\n// Extend AsyncResource so that async relations between posting a task and\n// receiving its result are visible to diagnostic tools.\nclass TaskInfo extends AsyncResource implements Task {\n  callback: TaskCallback\n  task: any\n  transferList: TransferList\n  channel?: TinypoolChannel\n  filename: string\n  name: string\n  taskId: number\n  abortSignal: AbortSignalAny | null\n  abortListener: (() => void) | null = null\n  workerInfo: WorkerInfo | null = null\n  created: number\n  started: number\n  cancel: () => void\n\n  constructor(\n    task: any,\n    transferList: TransferList,\n    filename: string,\n    name: string,\n    callback: TaskCallback,\n    abortSignal: AbortSignalAny | null,\n    triggerAsyncId: number,\n    channel?: TinypoolChannel\n  ) {\n    super('Tinypool.Task', { requireManualDestroy: true, triggerAsyncId })\n    this.callback = callback\n    this.task = task\n    this.transferList = transferList\n    this.cancel = () => this.callback(new CancelError(), null)\n    this.channel = channel\n\n    // If the task is a Transferable returned by\n    // Tinypool.move(), then add it to the transferList\n    // automatically\n    if (isMovable(task)) {\n      // This condition should never be hit but typescript\n      // complains if we dont do the check.\n      /* istanbul ignore if */\n      if (this.transferList == null) {\n        this.transferList = []\n      }\n      this.transferList = this.transferList.concat(task[kTransferable])\n      this.task = task[kValue]\n    }\n\n    this.filename = filename\n    this.name = name\n    this.taskId = taskIdCounter++\n    this.abortSignal = abortSignal\n    this.created = performance.now()\n    this.started = 0\n  }\n\n  releaseTask(): any {\n    const ret = this.task\n    this.task = null\n    return ret\n  }\n\n  done(err: unknown | null, result?: any): void {\n    this.emitDestroy() // `TaskInfo`s are used only once.\n    this.runInAsyncScope(this.callback, null, err, result)\n    // If an abort signal was used, remove the listener from it when\n    // done to make sure we do not accidentally leak.\n    if (this.abortSignal && this.abortListener) {\n      if ('removeEventListener' in this.abortSignal && this.abortListener) {\n        this.abortSignal.removeEventListener('abort', this.abortListener)\n      } else {\n        ;(this.abortSignal as AbortSignalEventEmitter).off(\n          'abort',\n          this.abortListener\n        )\n      }\n    }\n  }\n\n  get [kQueueOptions](): object | null {\n    return kQueueOptions in this.task ? this.task[kQueueOptions] : null\n  }\n}\n\nabstract class AsynchronouslyCreatedResource {\n  onreadyListeners: (() => void)[] | null = []\n\n  markAsReady(): void {\n    const listeners = this.onreadyListeners\n    assert(listeners !== null)\n    this.onreadyListeners = null\n    for (const listener of listeners) {\n      listener()\n    }\n  }\n\n  isReady(): boolean {\n    return this.onreadyListeners === null\n  }\n\n  onReady(fn: () => void) {\n    if (this.onreadyListeners === null) {\n      fn() // Zalgo is okay here.\n      return\n    }\n    this.onreadyListeners.push(fn)\n  }\n\n  abstract currentUsage(): number\n}\n\nclass AsynchronouslyCreatedResourcePool<\n  T extends AsynchronouslyCreatedResource,\n> {\n  pendingItems = new Set<T>()\n  readyItems = new Set<T>()\n  maximumUsage: number\n  onAvailableListeners: ((item: T) => void)[]\n\n  constructor(maximumUsage: number) {\n    this.maximumUsage = maximumUsage\n    this.onAvailableListeners = []\n  }\n\n  add(item: T) {\n    this.pendingItems.add(item)\n    item.onReady(() => {\n      /* istanbul ignore else */\n      if (this.pendingItems.has(item)) {\n        this.pendingItems.delete(item)\n        this.readyItems.add(item)\n        this.maybeAvailable(item)\n      }\n    })\n  }\n\n  delete(item: T) {\n    this.pendingItems.delete(item)\n    this.readyItems.delete(item)\n  }\n\n  findAvailable(): T | null {\n    let minUsage = this.maximumUsage\n    let candidate = null\n    for (const item of this.readyItems) {\n      const usage = item.currentUsage()\n      if (usage === 0) return item\n      if (usage < minUsage) {\n        candidate = item\n        minUsage = usage\n      }\n    }\n    return candidate\n  }\n\n  *[Symbol.iterator]() {\n    yield* this.pendingItems\n    yield* this.readyItems\n  }\n\n  get size() {\n    return this.pendingItems.size + this.readyItems.size\n  }\n\n  maybeAvailable(item: T) {\n    /* istanbul ignore else */\n    if (item.currentUsage() < this.maximumUsage) {\n      for (const listener of this.onAvailableListeners) {\n        listener(item)\n      }\n    }\n  }\n\n  onAvailable(fn: (item: T) => void) {\n    this.onAvailableListeners.push(fn)\n  }\n}\n\ntype ResponseCallback = (response: ResponseMessage) => void\n\nconst Errors = {\n  ThreadTermination: () => new Error('Terminating worker thread'),\n  FilenameNotProvided: () =>\n    new Error('filename must be provided to run() or in options object'),\n  TaskQueueAtLimit: () => new Error('Task queue is at limit'),\n  NoTaskQueueAvailable: () =>\n    new Error('No task queue available and all Workers are busy'),\n}\n\nclass WorkerInfo extends AsynchronouslyCreatedResource {\n  worker: TinypoolWorker\n  workerId: number\n  freeWorkerId: () => void\n  taskInfos: Map<number, TaskInfo>\n  idleTimeout: NodeJS.Timeout | null = null\n  port: MessagePort\n  sharedBuffer: Int32Array\n  lastSeenResponseCount: number = 0\n  usedMemory?: number\n  onMessage: ResponseCallback\n  shouldRecycle?: boolean\n  filename?: string | null\n  teardown?: string\n\n  constructor(\n    worker: TinypoolWorker,\n    port: MessagePort,\n    workerId: number,\n    freeWorkerId: () => void,\n    onMessage: ResponseCallback,\n    filename?: string | null,\n    teardown?: string\n  ) {\n    super()\n    this.worker = worker\n    this.workerId = workerId\n    this.freeWorkerId = freeWorkerId\n    this.teardown = teardown\n    this.filename = filename\n    this.port = port\n    this.port.on('message', (message: ResponseMessage) =>\n      this._handleResponse(message)\n    )\n    this.onMessage = onMessage\n    this.taskInfos = new Map()\n    this.sharedBuffer = new Int32Array(\n      new SharedArrayBuffer(kFieldCount * Int32Array.BYTES_PER_ELEMENT)\n    )\n  }\n\n  async destroy(timeout?: number): Promise<void> {\n    let resolve: () => void\n    let reject: (err: Error) => void\n\n    const ret = new Promise<void>((res, rej) => {\n      resolve = res\n      reject = rej\n    })\n\n    if (this.teardown && this.filename) {\n      const { teardown, filename } = this\n\n      await new Promise((resolve, reject) => {\n        this.postTask(\n          new TaskInfo(\n            {},\n            [],\n            filename,\n            teardown,\n            (error, result) => (error ? reject(error) : resolve(result)),\n            null,\n            1,\n            undefined\n          )\n        )\n      })\n    }\n\n    const timer = timeout\n      ? setTimeout(\n          () => reject(new Error('Failed to terminate worker')),\n          timeout\n        )\n      : null\n\n    void this.worker.terminate().then(() => {\n      if (timer !== null) {\n        clearTimeout(timer)\n      }\n\n      this.port.close()\n      this.clearIdleTimeout()\n      for (const taskInfo of this.taskInfos.values()) {\n        taskInfo.done(Errors.ThreadTermination())\n      }\n      this.taskInfos.clear()\n\n      resolve()\n    })\n\n    return ret\n  }\n\n  clearIdleTimeout(): void {\n    if (this.idleTimeout !== null) {\n      clearTimeout(this.idleTimeout)\n      this.idleTimeout = null\n    }\n  }\n\n  ref(): WorkerInfo {\n    this.port.ref()\n    return this\n  }\n\n  unref(): WorkerInfo {\n    // Note: Do not call ref()/unref() on the Worker itself since that may cause\n    // a hard crash, see https://github.com/nodejs/node/pull/33394.\n    this.port.unref()\n    return this\n  }\n\n  _handleResponse(message: ResponseMessage): void {\n    this.usedMemory = message.usedMemory\n    this.onMessage(message)\n\n    if (this.taskInfos.size === 0) {\n      // No more tasks running on this Worker means it should not keep the\n      // process running.\n      this.unref()\n    }\n  }\n\n  postTask(taskInfo: TaskInfo) {\n    assert(!this.taskInfos.has(taskInfo.taskId))\n    const message: RequestMessage = {\n      task: taskInfo.releaseTask(),\n      taskId: taskInfo.taskId,\n      filename: taskInfo.filename,\n      name: taskInfo.name,\n    }\n\n    try {\n      if (taskInfo.channel) {\n        this.worker.setChannel?.(taskInfo.channel)\n      }\n      this.port.postMessage(message, taskInfo.transferList)\n    } catch (err) {\n      // This would mostly happen if e.g. message contains unserializable data\n      // or transferList is invalid.\n      taskInfo.done(err)\n      return\n    }\n\n    taskInfo.workerInfo = this\n    this.taskInfos.set(taskInfo.taskId, taskInfo)\n    this.ref()\n    this.clearIdleTimeout()\n\n    // Inform the worker that there are new messages posted, and wake it up\n    // if it is waiting for one.\n    Atomics.add(this.sharedBuffer, kRequestCountField, 1)\n    Atomics.notify(this.sharedBuffer, kRequestCountField, 1)\n  }\n\n  processPendingMessages() {\n    // If we *know* that there are more messages than we have received using\n    // 'message' events yet, then try to load and handle them synchronously,\n    // without the need to wait for more expensive events on the event loop.\n    // This would usually break async tracking, but in our case, we already have\n    // the extra TaskInfo/AsyncResource layer that rectifies that situation.\n    const actualResponseCount = Atomics.load(\n      this.sharedBuffer,\n      kResponseCountField\n    )\n    if (actualResponseCount !== this.lastSeenResponseCount) {\n      this.lastSeenResponseCount = actualResponseCount\n\n      let entry\n      while ((entry = receiveMessageOnPort(this.port)) !== undefined) {\n        this._handleResponse(entry.message)\n      }\n    }\n  }\n\n  isRunningAbortableTask(): boolean {\n    // If there are abortable tasks, we are running one at most per Worker.\n    if (this.taskInfos.size !== 1) return false\n    const [first] = this.taskInfos\n    const [, task] = first || []\n    return task?.abortSignal !== null\n  }\n\n  currentUsage(): number {\n    if (this.isRunningAbortableTask()) return Infinity\n    return this.taskInfos.size\n  }\n}\n\nclass ThreadPool {\n  publicInterface: Tinypool\n  workers: AsynchronouslyCreatedResourcePool<WorkerInfo>\n  workerIds: Map<number, boolean> // Map<workerId, isIdAvailable>\n  options: FilledOptions\n  taskQueue: TaskQueue\n  skipQueue: TaskInfo[] = []\n  completed: number = 0\n  start: number = performance.now()\n  inProcessPendingMessages: boolean = false\n  startingUp: boolean = false\n  workerFailsDuringBootstrap: boolean = false\n\n  constructor(publicInterface: Tinypool, options: Options) {\n    this.publicInterface = publicInterface\n    this.taskQueue = options.taskQueue || new ArrayTaskQueue()\n\n    const filename = options.filename\n      ? maybeFileURLToPath(options.filename)\n      : null\n    this.options = { ...kDefaultOptions, ...options, filename, maxQueue: 0 }\n    // The >= and <= could be > and < but this way we get 100 % coverage 🙃\n    if (\n      options.maxThreads !== undefined &&\n      this.options.minThreads >= options.maxThreads\n    ) {\n      this.options.minThreads = options.maxThreads\n    }\n    if (\n      options.minThreads !== undefined &&\n      this.options.maxThreads <= options.minThreads\n    ) {\n      this.options.maxThreads = options.minThreads\n    }\n    if (options.maxQueue === 'auto') {\n      this.options.maxQueue = this.options.maxThreads ** 2\n    } else {\n      this.options.maxQueue = options.maxQueue ?? kDefaultOptions.maxQueue\n    }\n\n    this.workerIds = new Map(\n      new Array(this.options.maxThreads).fill(0).map((_, i) => [i + 1, true])\n    )\n\n    this.workers = new AsynchronouslyCreatedResourcePool<WorkerInfo>(\n      this.options.concurrentTasksPerWorker\n    )\n    this.workers.onAvailable((w: WorkerInfo) => this._onWorkerAvailable(w))\n\n    this.startingUp = true\n    this._ensureMinimumWorkers()\n    this.startingUp = false\n  }\n  _ensureEnoughWorkersForTaskQueue(): void {\n    while (\n      this.workers.size < this.taskQueue.size &&\n      this.workers.size < this.options.maxThreads\n    ) {\n      this._addNewWorker()\n    }\n  }\n\n  _ensureMaximumWorkers(): void {\n    while (this.workers.size < this.options.maxThreads) {\n      this._addNewWorker()\n    }\n  }\n\n  _ensureMinimumWorkers(): void {\n    while (this.workers.size < this.options.minThreads) {\n      this._addNewWorker()\n    }\n  }\n\n  _addNewWorker(): void {\n    const workerIds = this.workerIds\n\n    let workerId: number\n\n    workerIds.forEach((isIdAvailable, _workerId) => {\n      if (isIdAvailable && !workerId) {\n        workerId = _workerId\n        workerIds.set(_workerId, false)\n      }\n    })\n    const tinypoolPrivateData = { workerId: workerId! }\n\n    const worker =\n      this.options.runtime === 'child_process'\n        ? new ProcessWorker()\n        : new ThreadWorker()\n\n    worker.initialize({\n      env: this.options.env,\n      argv: this.options.argv,\n      execArgv: this.options.execArgv,\n      resourceLimits: this.options.resourceLimits,\n      workerData: [\n        tinypoolPrivateData,\n        this.options.workerData,\n      ] as TinypoolData,\n      trackUnmanagedFds: this.options.trackUnmanagedFds,\n      serialization: this.options.serialization,\n    })\n\n    const onMessage = (message: ResponseMessage) => {\n      const { taskId, result } = message\n      // In case of success: Call the callback that was passed to `runTask`,\n      // remove the `TaskInfo` associated with the Worker, which marks it as\n      // free again.\n      const taskInfo = workerInfo.taskInfos.get(taskId)\n      workerInfo.taskInfos.delete(taskId)\n\n      // Mark worker as available if it's not about to be removed\n      if (!this.shouldRecycleWorker(taskInfo)) {\n        this.workers.maybeAvailable(workerInfo)\n      }\n\n      /* istanbul ignore if */\n      if (taskInfo === undefined) {\n        const err = new Error(\n          `Unexpected message from Worker: ${inspect(message)}`\n        )\n        this.publicInterface.emit('error', err)\n      } else {\n        taskInfo.done(message.error, result)\n      }\n\n      this._processPendingMessages()\n    }\n\n    const { port1, port2 } = new MessageChannel()\n    const workerInfo = new WorkerInfo(\n      worker,\n      port1,\n      workerId!,\n      () => workerIds.set(workerId, true),\n      onMessage,\n      this.options.filename,\n      this.options.teardown\n    )\n    if (this.startingUp) {\n      // There is no point in waiting for the initial set of Workers to indicate\n      // that they are ready, we just mark them as such from the start.\n      workerInfo.markAsReady()\n    }\n\n    const message: StartupMessage = {\n      filename: this.options.filename,\n      name: this.options.name,\n      port: port2,\n      sharedBuffer: workerInfo.sharedBuffer,\n      useAtomics: this.options.useAtomics,\n    }\n\n    worker.postMessage(message, [port2])\n\n    worker.on('message', (message: ReadyMessage) => {\n      if (message.ready === true) {\n        port1.start()\n\n        if (workerInfo.currentUsage() === 0) {\n          workerInfo.unref()\n        }\n\n        if (!workerInfo.isReady()) {\n          workerInfo.markAsReady()\n        }\n        return\n      }\n\n      worker.emit(\n        'error',\n        new Error(`Unexpected message on Worker: ${inspect(message)}`)\n      )\n    })\n\n    worker.on('error', (err: Error) => {\n      // Work around the bug in https://github.com/nodejs/node/pull/33394\n      worker.ref = () => {}\n\n      // In case of an uncaught exception: Call the callback that was passed to\n      // `postTask` with the error, or emit an 'error' event if there is none.\n      const taskInfos = [...workerInfo.taskInfos.values()]\n      workerInfo.taskInfos.clear()\n\n      // Remove the worker from the list and potentially start a new Worker to\n      // replace the current one.\n      void this._removeWorker(workerInfo)\n\n      if (workerInfo.isReady() && !this.workerFailsDuringBootstrap) {\n        this._ensureMinimumWorkers()\n      } else {\n        // Do not start new workers over and over if they already fail during\n        // bootstrap, there's no point.\n        this.workerFailsDuringBootstrap = true\n      }\n\n      if (taskInfos.length > 0) {\n        for (const taskInfo of taskInfos) {\n          taskInfo.done(err, null)\n        }\n      } else {\n        this.publicInterface.emit('error', err)\n      }\n    })\n\n    worker.unref()\n    port1.on('close', () => {\n      // The port is only closed if the Worker stops for some reason, but we\n      // always .unref() the Worker itself. We want to receive e.g. 'error'\n      // events on it, so we ref it once we know it's going to exit anyway.\n      worker.ref()\n    })\n\n    this.workers.add(workerInfo)\n  }\n\n  _processPendingMessages() {\n    if (this.inProcessPendingMessages || !this.options.useAtomics) {\n      return\n    }\n\n    this.inProcessPendingMessages = true\n    try {\n      for (const workerInfo of this.workers) {\n        workerInfo.processPendingMessages()\n      }\n    } finally {\n      this.inProcessPendingMessages = false\n    }\n  }\n\n  _removeWorker(workerInfo: WorkerInfo): Promise<void> {\n    workerInfo.freeWorkerId()\n\n    this.workers.delete(workerInfo)\n\n    return workerInfo.destroy(this.options.terminateTimeout)\n  }\n\n  _onWorkerAvailable(workerInfo: WorkerInfo): void {\n    while (\n      (this.taskQueue.size > 0 || this.skipQueue.length > 0) &&\n      workerInfo.currentUsage() < this.options.concurrentTasksPerWorker\n    ) {\n      // The skipQueue will have tasks that we previously shifted off\n      // the task queue but had to skip over... we have to make sure\n      // we drain that before we drain the taskQueue.\n      const taskInfo =\n        this.skipQueue.shift() || (this.taskQueue.shift() as TaskInfo)\n      // If the task has an abortSignal and the worker has any other\n      // tasks, we cannot distribute the task to it. Skip for now.\n      if (taskInfo.abortSignal && workerInfo.taskInfos.size > 0) {\n        this.skipQueue.push(taskInfo)\n        break\n      }\n      const now = performance.now()\n      taskInfo.started = now\n      workerInfo.postTask(taskInfo)\n      this._maybeDrain()\n      return\n    }\n\n    if (\n      workerInfo.taskInfos.size === 0 &&\n      this.workers.size > this.options.minThreads\n    ) {\n      workerInfo.idleTimeout = setTimeout(() => {\n        assert.strictEqual(workerInfo.taskInfos.size, 0)\n        if (this.workers.size > this.options.minThreads) {\n          void this._removeWorker(workerInfo)\n        }\n      }, this.options.idleTimeout).unref()\n    }\n  }\n\n  runTask(task: any, options: RunOptions): Promise<any> {\n    let { filename, name } = options\n    const { transferList = [], signal = null, channel } = options\n\n    if (filename == null) {\n      filename = this.options.filename\n    }\n    if (name == null) {\n      name = this.options.name\n    }\n    if (typeof filename !== 'string') {\n      return Promise.reject(Errors.FilenameNotProvided())\n    }\n    filename = maybeFileURLToPath(filename)\n\n    let resolve: (result: any) => void\n    let reject: (err: Error) => void\n\n    const ret = new Promise((res, rej) => {\n      resolve = res\n      reject = rej\n    })\n    const taskInfo = new TaskInfo(\n      task,\n      transferList,\n      filename,\n      name,\n      (err: Error | null, result: any) => {\n        this.completed++\n        if (err !== null) {\n          reject(err)\n        }\n\n        if (this.shouldRecycleWorker(taskInfo)) {\n          this._removeWorker(taskInfo.workerInfo!)\n            .then(() => this._ensureMinimumWorkers())\n            .then(() => this._ensureEnoughWorkersForTaskQueue())\n            .then(() => resolve(result))\n            .catch(reject)\n        } else {\n          resolve(result)\n        }\n      },\n      signal,\n      this.publicInterface.asyncResource.asyncId(),\n      channel\n    )\n\n    if (signal !== null) {\n      // If the AbortSignal has an aborted property and it's truthy,\n      // reject immediately.\n      if ((signal as AbortSignalEventTarget).aborted) {\n        return Promise.reject(new AbortError())\n      }\n      taskInfo.abortListener = () => {\n        // Call reject() first to make sure we always reject with the AbortError\n        // if the task is aborted, not with an Error from the possible\n        // thread termination below.\n        reject(new AbortError())\n\n        if (taskInfo.workerInfo !== null) {\n          // Already running: We cancel the Worker this is running on.\n          void this._removeWorker(taskInfo.workerInfo)\n          this._ensureMinimumWorkers()\n        } else {\n          // Not yet running: Remove it from the queue.\n          this.taskQueue.remove(taskInfo)\n        }\n      }\n      onabort(signal, taskInfo.abortListener)\n    }\n\n    // If there is a task queue, there's no point in looking for an available\n    // Worker thread. Add this task to the queue, if possible.\n    if (this.taskQueue.size > 0) {\n      const totalCapacity = this.options.maxQueue + this.pendingCapacity()\n      if (this.taskQueue.size >= totalCapacity) {\n        if (this.options.maxQueue === 0) {\n          return Promise.reject(Errors.NoTaskQueueAvailable())\n        } else {\n          return Promise.reject(Errors.TaskQueueAtLimit())\n        }\n      } else {\n        if (this.workers.size < this.options.maxThreads) {\n          this._addNewWorker()\n        }\n        this.taskQueue.push(taskInfo)\n      }\n\n      return ret\n    }\n\n    // Look for a Worker with a minimum number of tasks it is currently running.\n    let workerInfo: WorkerInfo | null = this.workers.findAvailable()\n\n    // If we want the ability to abort this task, use only workers that have\n    // no running tasks.\n    if (workerInfo !== null && workerInfo.currentUsage() > 0 && signal) {\n      workerInfo = null\n    }\n\n    // If no Worker was found, or that Worker was handling another task in some\n    // way, and we still have the ability to spawn new threads, do so.\n    let waitingForNewWorker = false\n    if (\n      (workerInfo === null || workerInfo.currentUsage() > 0) &&\n      this.workers.size < this.options.maxThreads\n    ) {\n      this._addNewWorker()\n      waitingForNewWorker = true\n    }\n\n    // If no Worker is found, try to put the task into the queue.\n    if (workerInfo === null) {\n      if (this.options.maxQueue <= 0 && !waitingForNewWorker) {\n        return Promise.reject(Errors.NoTaskQueueAvailable())\n      } else {\n        this.taskQueue.push(taskInfo)\n      }\n\n      return ret\n    }\n\n    const now = performance.now()\n    taskInfo.started = now\n    workerInfo.postTask(taskInfo)\n    this._maybeDrain()\n\n    return ret\n  }\n\n  shouldRecycleWorker(taskInfo?: TaskInfo): boolean {\n    // Worker could be set to recycle by pool's imperative methods\n    if (taskInfo?.workerInfo?.shouldRecycle) {\n      return true\n    }\n\n    // When `isolateWorkers` is enabled, remove the worker after task is finished\n    if (this.options.isolateWorkers && taskInfo?.workerInfo) {\n      return true\n    }\n\n    // When `maxMemoryLimitBeforeRecycle` is enabled, remove workers that have exceeded the memory limit\n    if (\n      !this.options.isolateWorkers &&\n      this.options.maxMemoryLimitBeforeRecycle !== undefined &&\n      (taskInfo?.workerInfo?.usedMemory || 0) >\n        this.options.maxMemoryLimitBeforeRecycle\n    ) {\n      return true\n    }\n\n    return false\n  }\n\n  pendingCapacity(): number {\n    return (\n      this.workers.pendingItems.size * this.options.concurrentTasksPerWorker\n    )\n  }\n\n  _maybeDrain() {\n    if (this.taskQueue.size === 0 && this.skipQueue.length === 0) {\n      this.publicInterface.emit('drain')\n    }\n  }\n\n  async destroy() {\n    while (this.skipQueue.length > 0) {\n      const taskInfo: TaskInfo = this.skipQueue.shift() as TaskInfo\n      taskInfo.done(new Error('Terminating worker thread'))\n    }\n    while (this.taskQueue.size > 0) {\n      const taskInfo: TaskInfo = this.taskQueue.shift() as TaskInfo\n      taskInfo.done(new Error('Terminating worker thread'))\n    }\n\n    const exitEvents: Promise<any[]>[] = []\n    while (this.workers.size > 0) {\n      const [workerInfo] = this.workers\n      // @ts-expect-error -- TODO Fix\n      exitEvents.push(once(workerInfo.worker, 'exit'))\n      // @ts-expect-error -- TODO Fix\n      void this._removeWorker(workerInfo)\n    }\n\n    await Promise.all(exitEvents)\n  }\n\n  async recycleWorkers(options: Pick<Options, 'runtime'> = {}) {\n    const runtimeChanged =\n      options?.runtime && options.runtime !== this.options.runtime\n\n    if (options?.runtime) {\n      this.options.runtime = options.runtime\n    }\n\n    // Worker's are automatically recycled when isolateWorkers is enabled.\n    // Idle workers still need to be recycled if runtime changed\n    if (this.options.isolateWorkers && !runtimeChanged) {\n      return\n    }\n\n    const exitEvents: Promise<any[]>[] = []\n\n    Array.from(this.workers).filter((workerInfo) => {\n      // Remove idle workers\n      if (workerInfo.currentUsage() === 0) {\n        // @ts-expect-error -- TODO Fix\n        exitEvents.push(once(workerInfo.worker, 'exit'))\n        void this._removeWorker(workerInfo)\n      }\n      // Mark on-going workers for recycling.\n      // Note that we don't need to wait for these ones to finish\n      // as pool.shouldRecycleWorker will do it once task has finished\n      else {\n        workerInfo.shouldRecycle = true\n      }\n    })\n\n    await Promise.all(exitEvents)\n\n    this._ensureMinimumWorkers()\n  }\n}\n\nclass Tinypool extends EventEmitterAsyncResource {\n  #pool: ThreadPool\n\n  constructor(options: Options = {}) {\n    // convert fractional option values to int\n    if (\n      options.minThreads !== undefined &&\n      options.minThreads > 0 &&\n      options.minThreads < 1\n    ) {\n      options.minThreads = Math.max(\n        1,\n        Math.floor(options.minThreads * cpuCount)\n      )\n    }\n    if (\n      options.maxThreads !== undefined &&\n      options.maxThreads > 0 &&\n      options.maxThreads < 1\n    ) {\n      options.maxThreads = Math.max(\n        1,\n        Math.floor(options.maxThreads * cpuCount)\n      )\n    }\n\n    super({ ...options, name: 'Tinypool' })\n\n    if (\n      options.minThreads !== undefined &&\n      options.maxThreads !== undefined &&\n      options.minThreads > options.maxThreads\n    ) {\n      throw new RangeError(\n        'options.minThreads and options.maxThreads must not conflict'\n      )\n    }\n\n    this.#pool = new ThreadPool(this, options)\n  }\n\n  run(task: any, options: RunOptions = kDefaultRunOptions) {\n    const { transferList, filename, name, signal, runtime, channel } = options\n\n    return this.#pool.runTask(task, {\n      transferList,\n      filename,\n      name,\n      signal,\n      runtime,\n      channel,\n    })\n  }\n\n  async destroy() {\n    await this.#pool.destroy()\n    this.emitDestroy()\n  }\n\n  get options(): FilledOptions {\n    return this.#pool.options\n  }\n\n  get threads(): TinypoolWorker[] {\n    const ret: TinypoolWorker[] = []\n    for (const workerInfo of this.#pool.workers) {\n      ret.push(workerInfo.worker)\n    }\n    return ret\n  }\n\n  get queueSize(): number {\n    const pool = this.#pool\n    return Math.max(pool.taskQueue.size - pool.pendingCapacity(), 0)\n  }\n\n  cancelPendingTasks() {\n    const pool = this.#pool\n    pool.taskQueue.cancel()\n  }\n\n  async recycleWorkers(options: Pick<Options, 'runtime'> = {}) {\n    await this.#pool.recycleWorkers(options)\n  }\n\n  get completed(): number {\n    return this.#pool.completed\n  }\n\n  get duration(): number {\n    return performance.now() - this.#pool.start\n  }\n\n  static get isWorkerThread(): boolean {\n    return process.__tinypool_state__?.isWorkerThread || false\n  }\n\n  static get workerData(): any {\n    return process.__tinypool_state__?.workerData || undefined\n  }\n\n  static get version(): string {\n    const { version } = JSON.parse(\n      readFileSync(join(__dirname, '../package.json'), 'utf-8')\n    ) as typeof import('../package.json')\n    return version\n  }\n\n  static move(\n    val:\n      | Transferable\n      | TransferListItem\n      | ArrayBufferView\n      | ArrayBuffer\n      | MessagePort\n  ) {\n    if (val != null && typeof val === 'object' && typeof val !== 'function') {\n      if (!isTransferable(val)) {\n        if (types.isArrayBufferView(val)) {\n          val = new ArrayBufferViewTransferable(val as ArrayBufferView)\n        } else {\n          val = new DirectlyTransferable(val)\n        }\n      }\n      markMovable(val)\n    }\n    return val\n  }\n\n  static get transferableSymbol() {\n    return kTransferable\n  }\n\n  static get valueSymbol() {\n    return kValue\n  }\n\n  static get queueOptionsSymbol() {\n    return kQueueOptions\n  }\n}\n\nconst _workerId = process.__tinypool_state__?.workerId\n\nexport * from './common'\nexport { Tinypool, Options, _workerId as workerId }\nexport default Tinypool\n"
  },
  {
    "path": "src/runtime/process-worker.ts",
    "content": "import { type ChildProcess, fork } from 'node:child_process'\nimport { MessagePort, type TransferListItem } from 'node:worker_threads'\nimport { fileURLToPath } from 'node:url'\nimport {\n  type TinypoolChannel,\n  type TinypoolWorker,\n  type TinypoolWorkerMessage,\n} from '../common'\n\nconst __tinypool_worker_message__ = true\nconst SIGKILL_TIMEOUT = 1000\n\nexport default class ProcessWorker implements TinypoolWorker {\n  name = 'ProcessWorker'\n  runtime = 'child_process'\n  process!: ChildProcess\n  threadId!: number\n  port?: MessagePort\n  channel?: TinypoolChannel\n  waitForExit!: Promise<void>\n  isTerminating = false\n\n  initialize(options: Parameters<TinypoolWorker['initialize']>[0]) {\n    this.process = fork(\n      fileURLToPath(import.meta.url + '/../entry/process.js'),\n      options.argv,\n      {\n        ...options,\n        stdio: 'pipe',\n        env: {\n          ...options.env,\n          TINYPOOL_WORKER_ID: options.workerData[0].workerId.toString(),\n        },\n      }\n    )\n\n    process.stdout.setMaxListeners(1 + process.stdout.getMaxListeners())\n    process.stderr.setMaxListeners(1 + process.stderr.getMaxListeners())\n    this.process.stdout?.pipe(process.stdout)\n    this.process.stderr?.pipe(process.stderr)\n\n    this.threadId = this.process.pid!\n\n    this.process.on('exit', this.onUnexpectedExit)\n    this.waitForExit = new Promise((r) => this.process.on('exit', r))\n  }\n\n  onUnexpectedExit = () => {\n    this.process.emit('error', new Error('Worker exited unexpectedly'))\n  }\n\n  async terminate() {\n    this.isTerminating = true\n    this.process.off('exit', this.onUnexpectedExit)\n\n    const sigkillTimeout = setTimeout(\n      () => this.process.kill('SIGKILL'),\n      SIGKILL_TIMEOUT\n    )\n\n    this.process.kill()\n    await this.waitForExit\n\n    this.process.stdout?.unpipe(process.stdout)\n    this.process.stderr?.unpipe(process.stderr)\n    this.port?.close()\n    this.channel?.onClose?.()\n    clearTimeout(sigkillTimeout)\n  }\n\n  setChannel(channel: TinypoolChannel) {\n    // Previous channel exists in non-isolated runs\n    if (this.channel && this.channel !== channel) {\n      this.channel.onClose?.()\n    }\n\n    this.channel = channel\n\n    // Mirror channel's messages to process\n    this.channel.onMessage?.((message: any) => {\n      this.send(message)\n    })\n  }\n\n  private send(message: Parameters<NonNullable<(typeof process)['send']>>[0]) {\n    if (!this.isTerminating) {\n      this.process.send(message)\n    }\n  }\n\n  postMessage(message: any, transferListItem?: Readonly<TransferListItem[]>) {\n    transferListItem?.forEach((item) => {\n      if (item instanceof MessagePort) {\n        this.port = item\n        this.port.start()\n      }\n    })\n\n    // Mirror port's messages to process\n    if (this.port) {\n      this.port.on('message', (message) =>\n        this.send(<TinypoolWorkerMessage<'port'>>{\n          ...message,\n          source: 'port',\n          __tinypool_worker_message__,\n        })\n      )\n    }\n\n    return this.send(<TinypoolWorkerMessage<'pool'>>{\n      ...message,\n      source: 'pool',\n      __tinypool_worker_message__,\n    })\n  }\n\n  on(event: string, callback: (...args: any[]) => void) {\n    return this.process.on(event, (data: TinypoolWorkerMessage) => {\n      // All errors should be forwarded to the pool\n      if (event === 'error') {\n        return callback(data)\n      }\n\n      if (!data || !data.__tinypool_worker_message__) {\n        return this.channel?.postMessage?.(data)\n      }\n\n      if (data.source === 'pool') {\n        callback(data)\n      } else if (data.source === 'port') {\n        this.port!.postMessage(data)\n      }\n    })\n  }\n\n  once(event: string, callback: (...args: any[]) => void) {\n    return this.process.once(event, callback)\n  }\n\n  emit(event: string, ...data: any[]) {\n    return this.process.emit(event, ...data)\n  }\n\n  ref() {\n    return this.process.ref()\n  }\n\n  unref() {\n    this.port?.unref()\n\n    // The forked child_process adds event listener on `process.on('message)`.\n    // This requires manual unreffing of its channel.\n    this.process.channel?.unref?.()\n\n    if (hasUnref(this.process.stdout)) {\n      this.process.stdout.unref()\n    }\n\n    if (hasUnref(this.process.stderr)) {\n      this.process.stderr.unref()\n    }\n\n    return this.process.unref()\n  }\n}\n\n// unref is untyped for some reason\nfunction hasUnref(stream: null | object): stream is { unref: () => void } {\n  return (\n    stream != null && 'unref' in stream && typeof stream.unref === 'function'\n  )\n}\n"
  },
  {
    "path": "src/runtime/thread-worker.ts",
    "content": "import { fileURLToPath } from 'node:url'\nimport { type TransferListItem, Worker } from 'node:worker_threads'\nimport { type TinypoolWorker, type TinypoolChannel } from '../common'\n\nexport default class ThreadWorker implements TinypoolWorker {\n  name = 'ThreadWorker'\n  runtime = 'worker_threads'\n  thread!: Worker\n  threadId!: number\n  channel?: TinypoolChannel\n\n  initialize(options: Parameters<TinypoolWorker['initialize']>[0]) {\n    this.thread = new Worker(\n      fileURLToPath(import.meta.url + '/../entry/worker.js'),\n      options\n    )\n    this.threadId = this.thread.threadId\n  }\n\n  async terminate() {\n    const output = await this.thread.terminate()\n\n    this.channel?.onClose?.()\n\n    return output\n  }\n\n  postMessage(message: any, transferListItem?: Readonly<TransferListItem[]>) {\n    return this.thread.postMessage(message, transferListItem)\n  }\n\n  on(event: string, callback: (...args: any[]) => void) {\n    return this.thread.on(event, callback)\n  }\n\n  once(event: string, callback: (...args: any[]) => void) {\n    return this.thread.once(event, callback)\n  }\n\n  emit(event: string, ...data: any[]) {\n    return this.thread.emit(event, ...data)\n  }\n\n  ref() {\n    return this.thread.ref()\n  }\n\n  unref() {\n    return this.thread.unref()\n  }\n\n  setChannel(channel: TinypoolChannel) {\n    if (channel.onMessage) {\n      throw new Error(\n        \"{ runtime: 'worker_threads' } doesn't support channel.onMessage. Use transferListItem for listening to messages instead.\"\n      )\n    }\n\n    if (channel.postMessage) {\n      throw new Error(\n        \"{ runtime: 'worker_threads' } doesn't support channel.postMessage. Use transferListItem for sending to messages instead.\"\n      )\n    }\n\n    // Previous channel exists in non-isolated runs\n    if (this.channel && this.channel !== channel) {\n      this.channel.onClose?.()\n    }\n\n    this.channel = channel\n  }\n}\n"
  },
  {
    "path": "src/utils.ts",
    "content": "export function stdout(): NodeJS.WriteStream | undefined {\n  // @ts-expect-error Node.js maps process.stdout to console._stdout\n  return console._stdout || process.stdout || undefined\n}\n\nexport function stderr(): NodeJS.WriteStream | undefined {\n  // @ts-expect-error Node.js maps process.stderr to console._stderr\n  return console._stderr || process.stderr || undefined\n}\n"
  },
  {
    "path": "test/async-context.test.ts",
    "content": "import { createHook, executionAsyncId } from 'node:async_hooks'\nimport { Tinypool } from 'tinypool'\nimport { dirname, resolve } from 'node:path'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('postTask() calls the correct async hooks', async () => {\n  let taskId: number\n  let initCalls = 0\n  let beforeCalls = 0\n  let afterCalls = 0\n  let resolveCalls = 0\n\n  const hook = createHook({\n    init(id, type) {\n      if (type === 'Tinypool.Task') {\n        initCalls++\n        taskId = id\n      }\n    },\n    before(id) {\n      if (id === taskId) beforeCalls++\n    },\n    after(id) {\n      if (id === taskId) afterCalls++\n    },\n    promiseResolve() {\n      if (executionAsyncId() === taskId) resolveCalls++\n    },\n  })\n  hook.enable()\n\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n\n  await pool.run('42')\n\n  hook.disable()\n  expect(initCalls).toBe(1)\n  expect(beforeCalls).toBe(1)\n  expect(afterCalls).toBe(1)\n  expect(resolveCalls).toBe(1)\n})\n"
  },
  {
    "path": "test/atomic.test.ts",
    "content": "import Tinypool from 'tinypool'\nimport { dirname, resolve } from 'node:path'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('coverage test for Atomics optimization', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/notify-then-sleep-or.js'),\n    minThreads: 2,\n    maxThreads: 2,\n    concurrentTasksPerWorker: 2,\n  })\n\n  const tasks = []\n  let v: number\n\n  // Post 4 tasks, and wait for all of them to be ready.\n  const i32array = new Int32Array(new SharedArrayBuffer(4))\n  for (let index = 0; index < 4; index++) {\n    tasks.push(pool.run({ i32array, index }))\n  }\n\n  // Wait for 2 tasks to enter 'wait' state.\n  do {\n    v = Atomics.load(i32array, 0)\n    if (popcount8(v) >= 2) break\n    Atomics.wait(i32array, 0, v)\n  } while (true) // eslint-disable-line no-constant-condition -- intentional\n\n  // The check above could also be !== 2 but it's hard to get things right\n  // sometimes and this gives us a nice assertion. Basically, at this point\n  // exactly 2 tasks should be in Atomics.wait() state.\n  expect(popcount8(v)).toBe(2)\n  // Wake both tasks up as simultaneously as possible. The other 2 tasks should\n  // then start executing.\n  Atomics.store(i32array, 0, 0)\n  Atomics.notify(i32array, 0, Infinity)\n\n  // Wait for the other 2 tasks to enter 'wait' state.\n  do {\n    v = Atomics.load(i32array, 0)\n    if (popcount8(v) >= 2) break\n    Atomics.wait(i32array, 0, v)\n  } while (true) // eslint-disable-line no-constant-condition -- intentional\n\n  // At this point, the first two tasks are definitely finished and have\n  // definitely posted results back to the main thread, and the main thread\n  // has definitely not received them yet, meaning that the Atomics check will\n  // be used. Making sure that that works is the point of this test.\n\n  // Wake up the remaining 2 tasks in order to make sure that the test finishes.\n  // Do the same consistency check beforehand as above.\n  expect(popcount8(v)).toBe(2)\n  Atomics.store(i32array, 0, 0)\n  Atomics.notify(i32array, 0, Infinity)\n\n  await Promise.all(tasks)\n})\n\n// Inefficient but straightforward 8-bit popcount\nfunction popcount8(v: number): number {\n  v &= 0xff\n  if (v & 0b11110000) return popcount8(v >>> 4) + popcount8(v & 0xb00001111)\n  if (v & 0b00001100) return popcount8(v >>> 2) + popcount8(v & 0xb00000011)\n  if (v & 0b00000010) return popcount8(v >>> 1) + popcount8(v & 0xb00000001)\n  return v\n}\n\ntest('avoids unbounded recursion', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/simple-isworkerthread.js'),\n    minThreads: 2,\n    maxThreads: 2,\n  })\n\n  const tasks = []\n  for (let i = 1; i <= 10000; i++) {\n    tasks.push(pool.run(null))\n  }\n\n  await Promise.all(tasks)\n})\n"
  },
  {
    "path": "test/fixtures/child_process-communication.mjs",
    "content": "export default async function run(task) {\n  let resolve = () => {}\n  const promise = new Promise((r) => (resolve = r))\n\n  process.send('Child process started')\n\n  process.on('message', (message) => {\n    process.send({ received: message, response: 'Hello from worker' })\n    resolve({ received: task, response: 'Hello from worker' })\n  })\n\n  return promise\n}\n"
  },
  {
    "path": "test/fixtures/esm-export.mjs",
    "content": "export default function (code) {\n  return eval(code)\n}\n"
  },
  {
    "path": "test/fixtures/eval.js",
    "content": "export default function (code) {\n  return eval(code)\n}\n"
  },
  {
    "path": "test/fixtures/isolated.js",
    "content": "let count = 0\n\nexport default () => count++\n"
  },
  {
    "path": "test/fixtures/leak-memory.js",
    "content": "/** Enable to see memory leak logging */\nconst logOutput = false\n\n// eslint-disable-next-line prefer-const -- intentional\nexport let leaks = []\n\n/**\n * Leak some memory to test memory limit usage.\n * The argument `bytes` is not 100% accurate of the leaked bytes but good enough.\n */\nexport default function run(bytes) {\n  const before = process.memoryUsage().heapUsed\n\n  for (const _ of Array(bytes).fill()) {\n    leaks.push(new SharedArrayBuffer(1024))\n  }\n  const after = process.memoryUsage().heapUsed\n  const diff = after - before\n\n  if (logOutput) {\n    console.log(`Leaked: ${diff}. Heap used: ${process.memoryUsage().heapUsed}`)\n  }\n}\n"
  },
  {
    "path": "test/fixtures/move.js",
    "content": "import Tinypool from '../../dist/index.js'\nimport assert from 'node:assert'\nimport { types } from 'node:util'\n\nexport default function (moved) {\n  if (moved !== undefined) {\n    assert(types.isAnyArrayBuffer(moved))\n  }\n  return Tinypool.move(new ArrayBuffer(10))\n}\n"
  },
  {
    "path": "test/fixtures/multiple.js",
    "content": "'use strict'\n\nexport function a() {\n  return 'a'\n}\n\nexport function b() {\n  return 'b'\n}\n\nexport default a\n"
  },
  {
    "path": "test/fixtures/nested-pool.mjs",
    "content": "import { cpus } from 'node:os'\nimport { Tinypool } from 'tinypool'\n\nexport default async function nestedPool() {\n  const pool = new Tinypool({\n    filename: new URL(import.meta.url, import.meta.url).href,\n    runtime: 'child_process',\n    isolateWorkers: true,\n    minThreads: cpus().length - 1,\n    maxThreads: cpus().length - 1,\n  })\n\n  await Promise.resolve()\n  void pool.recycleWorkers()\n}\n\nexport function entrypoint() {}\n"
  },
  {
    "path": "test/fixtures/notify-then-sleep-or.js",
    "content": "// Set the index-th bith in i32array[0], then wait for it to be un-set again.\nexport default function ({ i32array, index }) {\n  Atomics.or(i32array, 0, 1 << index)\n  Atomics.notify(i32array, 0, Infinity)\n  do {\n    const v = Atomics.load(i32array, 0)\n    if (!(v & (1 << index))) break\n    Atomics.wait(i32array, 0, v)\n  } while (true) // eslint-disable-line no-constant-condition -- intentional\n}\n"
  },
  {
    "path": "test/fixtures/resource-limits.js",
    "content": "'use strict'\n\nexport default () => {\n  const array = []\n  while (true) {\n    array.push([array])\n  }\n}\n"
  },
  {
    "path": "test/fixtures/simple-isworkerthread.js",
    "content": "import Tinypool from '../../dist/index.js'\nimport assert from 'node:assert'\n\nassert.strictEqual(Tinypool.isWorkerThread, true)\n\nexport default function () {\n  return 'done'\n}\n"
  },
  {
    "path": "test/fixtures/simple-workerdata.js",
    "content": "import Tinypool from '../../dist/index.js'\nimport assert from 'node:assert'\n\nassert.strictEqual(Tinypool.workerData, 'ABC')\n\nexport default function () {\n  return 'done'\n}\n"
  },
  {
    "path": "test/fixtures/sleep.js",
    "content": "import { promisify } from 'node:util'\nconst sleep = promisify(setTimeout)\n\nconst buf = new Uint32Array(new SharedArrayBuffer(4))\n\nexport default async ({ time = 100, a }) => {\n  await sleep(time)\n  const ret = Atomics.exchange(buf, 0, a)\n  return ret\n}\n"
  },
  {
    "path": "test/fixtures/stdio.mjs",
    "content": "export default function run() {\n  process.stdout.write('Worker message')\n  process.stderr.write('Worker error')\n}\n"
  },
  {
    "path": "test/fixtures/teardown.mjs",
    "content": "import { setTimeout } from 'node:timers/promises'\n\nlet state = 0\n\n/** @type {import(\"node:worker_threads\").MessagePort } */\nlet port\n\nexport default function task(options) {\n  port ||= options?.port\n  state++\n\n  return `Output of task #${state}`\n}\n\nexport async function namedTeardown() {\n  await setTimeout(50)\n\n  port?.postMessage(`Teardown of task #${state}`)\n}\n"
  },
  {
    "path": "test/fixtures/wait-for-notify.js",
    "content": "export default function (i32array) {\n  Atomics.wait(i32array, 0, 0)\n  Atomics.store(i32array, 0, -1)\n  Atomics.notify(i32array, 0, Infinity)\n}\n"
  },
  {
    "path": "test/fixtures/wait-for-others.js",
    "content": "import { threadId } from 'node:worker_threads'\n\nexport default function ([i32array, n]) {\n  Atomics.add(i32array, 0, 1)\n  Atomics.notify(i32array, 0, Infinity)\n  let lastSeenValue\n  while ((lastSeenValue = Atomics.load(i32array, 0)) < n) {\n    Atomics.wait(i32array, 0, lastSeenValue)\n  }\n  return threadId\n}\n"
  },
  {
    "path": "test/fixtures/workerId.js",
    "content": "import { workerId } from '../../dist/index.js'\n\nexport default async ({ slow }) => {\n  if (slow) {\n    await new Promise((res) => setTimeout(res, 300))\n  }\n\n  return workerId\n}\n"
  },
  {
    "path": "test/globals.test.ts",
    "content": "import * as path from 'node:path'\nimport { fileURLToPath } from 'node:url'\nimport { Tinypool } from 'tinypool'\n\nconst __dirname = path.dirname(fileURLToPath(import.meta.url))\n\ndescribe.each(['worker_threads', 'child_process'] as const)('%s', (runtime) => {\n  test(\"doesn't hang when process is overwritten\", async () => {\n    const pool = createPool({ runtime })\n\n    const result = await pool.run(`\n    (async () => {\n      return new Promise(resolve => {\n        globalThis.process = { exit: resolve };\n        process.exit(\"exit() from overwritten process\");\n      });\n    })();\n    `)\n    expect(result).toBe('exit() from overwritten process')\n  })\n})\n\nfunction createPool(options: Partial<Tinypool['options']>) {\n  const pool = new Tinypool({\n    filename: path.resolve(__dirname, 'fixtures/eval.js'),\n    minThreads: 1,\n    maxThreads: 1,\n    ...options,\n  })\n\n  return pool\n}\n"
  },
  {
    "path": "test/idle-timeout.test.ts",
    "content": "import { promisify } from 'node:util'\nimport { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\nconst delay = promisify(setTimeout)\ntest('idle timeout will let go of threads early', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/wait-for-others.js'),\n    idleTimeout: 500,\n    minThreads: 1,\n    maxThreads: 2,\n  })\n\n  expect(pool.threads.length).toBe(1)\n  const buffer = new Int32Array(new SharedArrayBuffer(4))\n\n  const firstTasks = [pool.run([buffer, 2]), pool.run([buffer, 2])]\n  expect(pool.threads.length).toBe(2)\n\n  const earlyThreadIds = await Promise.all(firstTasks)\n  expect(pool.threads.length).toBe(2)\n\n  await delay(2000)\n  expect(pool.threads.length).toBe(1)\n\n  const secondTasks = [pool.run([buffer, 4]), pool.run([buffer, 4])]\n  expect(pool.threads.length).toBe(2)\n\n  const lateThreadIds = await Promise.all(secondTasks)\n\n  // One thread should have been idle in between and exited, one should have\n  // been reused.\n  expect(earlyThreadIds.length).toBe(2)\n  expect(lateThreadIds.length).toBe(2)\n  expect(new Set([...earlyThreadIds, ...lateThreadIds]).size).toBe(3)\n})\n"
  },
  {
    "path": "test/isolation.test.ts",
    "content": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ndescribe.each(['worker_threads', 'child_process'] as const)('%s', (runtime) => {\n  test('idle workers can be recycled', async () => {\n    const pool = new Tinypool({\n      runtime,\n      filename: resolve(__dirname, 'fixtures/sleep.js'),\n      minThreads: 4,\n      maxThreads: 4,\n      isolateWorkers: false,\n    })\n\n    function getThreadIds() {\n      return pool.threads.map((thread) => thread.threadId).sort((a, b) => a - b)\n    }\n\n    expect(pool.threads).toHaveLength(4)\n    const initialThreadIds = getThreadIds()\n\n    await Promise.all(times(4)(() => pool.run({})))\n    expect(getThreadIds()).toStrictEqual(initialThreadIds)\n\n    await pool.recycleWorkers()\n    expect(pool.threads).toHaveLength(4)\n\n    const newThreadIds = getThreadIds()\n    initialThreadIds.forEach((id) => expect(newThreadIds).not.toContain(id))\n\n    await Promise.all(times(4)(() => pool.run({})))\n    initialThreadIds.forEach((id) => expect(newThreadIds).not.toContain(id))\n    expect(getThreadIds()).toStrictEqual(newThreadIds)\n  })\n\n  test('running workers can recycle after task execution finishes', async () => {\n    const pool = new Tinypool({\n      runtime,\n      filename: resolve(__dirname, 'fixtures/sleep.js'),\n      minThreads: 4,\n      maxThreads: 4,\n      isolateWorkers: false,\n    })\n\n    function getThreadIds() {\n      return pool.threads.map((thread) => thread.threadId).sort((a, b) => a - b)\n    }\n\n    expect(pool.threads).toHaveLength(4)\n    const initialThreadIds = getThreadIds()\n\n    const tasks = [\n      ...times(2)(() => pool.run({ time: 1 })),\n      ...times(2)(() => pool.run({ time: 2000 })),\n    ]\n\n    // Wait for first two tasks to finish\n    await Promise.all(tasks.slice(0, 2))\n\n    await pool.recycleWorkers()\n    const threadIds = getThreadIds()\n\n    // Idle workers should have been recycled immediately\n    // Running workers should not have recycled yet\n    expect(intersection(threadIds, initialThreadIds)).toHaveLength(2)\n\n    await Promise.all(tasks)\n\n    // All workers should have recycled now\n    const newThreadIds = getThreadIds()\n    initialThreadIds.forEach((id) => expect(newThreadIds).not.toContain(id))\n  })\n})\n\nfunction times(count: number) {\n  return function run<T>(fn: () => T): T[] {\n    return Array(count).fill(0).map(fn)\n  }\n}\n\nfunction intersection<T>(a: T[], b: T[]) {\n  return a.filter((value) => b.includes(value))\n}\n"
  },
  {
    "path": "test/move.test.ts",
    "content": "import { Tinypool, isMovable, markMovable, isTransferable } from 'tinypool'\nimport { types } from 'node:util'\nimport { MessageChannel, MessagePort } from 'node:worker_threads'\nimport { dirname, resolve } from 'node:path'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\nconst transferableSymbol = Tinypool.transferableSymbol as never\nconst valueSymbol = Tinypool.valueSymbol as never\n\ntest('Marking an object as movable works as expected', async () => {\n  const obj: any = {\n    get [transferableSymbol](): object {\n      return {}\n    },\n    get [valueSymbol](): object {\n      return {}\n    },\n  }\n  expect(isTransferable(obj)).toBe(true)\n  expect(!isMovable(obj)).toBe(true) // It's not movable initially\n  markMovable(obj)\n  expect(isMovable(obj)).toBe(true) // It is movable now\n})\n\ntest('Marking primitives and null works as expected', async () => {\n  expect(Tinypool.move(null!)).toBe(null)\n  expect(Tinypool.move(1 as any)).toBe(1)\n  expect(Tinypool.move(false as any)).toBe(false)\n  expect(Tinypool.move('test' as any)).toBe('test')\n})\n\ntest('Using Tinypool.move() returns a movable object', async () => {\n  const obj: any = {\n    get [transferableSymbol](): object {\n      return {}\n    },\n    get [valueSymbol](): object {\n      return {}\n    },\n  }\n  expect(!isMovable(obj)).toBe(true) // It's not movable initially\n  const movable = Tinypool.move(obj)\n  expect(isMovable(movable)).toBe(true) // It is movable now\n})\n\ntest('Using ArrayBuffer works as expected', async () => {\n  const ab = new ArrayBuffer(5)\n  const movable = Tinypool.move(ab)\n  expect(isMovable(movable)).toBe(true)\n  expect(types.isAnyArrayBuffer(movable[valueSymbol])).toBe(true)\n  expect(types.isAnyArrayBuffer(movable[transferableSymbol])).toBe(true)\n  expect(movable[transferableSymbol]).toEqual(ab)\n})\n\ntest('Using TypedArray works as expected', async () => {\n  const ab = new Uint8Array(5)\n  const movable = Tinypool.move(ab)\n  expect(isMovable(movable)).toBe(true)\n  expect(types.isArrayBufferView(movable[valueSymbol])).toBe(true)\n  expect(types.isAnyArrayBuffer(movable[transferableSymbol])).toBe(true)\n  expect(movable[transferableSymbol]).toEqual(ab.buffer)\n})\n\ntest('Using MessagePort works as expected', async () => {\n  const mc = new MessageChannel()\n  const movable = Tinypool.move(mc.port1)\n  expect(isMovable(movable)).toBe(true)\n  expect((movable[valueSymbol] as unknown) instanceof MessagePort).toBe(true)\n  expect((movable[transferableSymbol] as unknown) instanceof MessagePort).toBe(\n    true\n  )\n  expect(movable[transferableSymbol]).toEqual(mc.port1)\n})\n\ntest('Moving works', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/move.js'),\n  })\n\n  {\n    const ab = new ArrayBuffer(10)\n    const ret = await pool.run(Tinypool.move(ab))\n    expect(ab.byteLength).toBe(0) // It was moved\n    expect(types.isAnyArrayBuffer(ret)).toBe(true)\n  }\n\n  {\n    // Test with empty transferList\n    const ab = new ArrayBuffer(10)\n    const ret = await pool.run(Tinypool.move(ab), { transferList: [] })\n    expect(ab.byteLength).toBe(0) // It was moved\n    expect(types.isAnyArrayBuffer(ret)).toBe(true)\n  }\n\n  {\n    // Test with empty transferList\n    const ab = new ArrayBuffer(10)\n    const ret = await pool.run(Tinypool.move(ab))\n    expect(ab.byteLength).toBe(0) // It was moved\n    expect(types.isAnyArrayBuffer(ret)).toBe(true)\n  }\n\n  {\n    // Test with empty transferList\n    const ab = new ArrayBuffer(10)\n    const ret = await pool.run(Tinypool.move(ab), { transferList: [] })\n    expect(ab.byteLength).toBe(0) // It was moved\n    expect(types.isAnyArrayBuffer(ret)).toBe(true)\n  }\n})\n"
  },
  {
    "path": "test/options.test.ts",
    "content": "import { expect, test, vi } from 'vitest'\n\nlet Tinypool: typeof import('tinypool').default\nconst cpuCount = vi.hoisted(() => 100)\n\nbeforeAll(async () => {\n  vi.resetModules()\n  Tinypool = (await import('tinypool')).default\n})\n\ntest('fractional thread limits can be set', async () => {\n  const min = 0.5\n  const max = 0.75\n  const p = new Tinypool({\n    minThreads: min,\n    maxThreads: max,\n  })\n\n  expect(p.options.minThreads).toBe(cpuCount * min)\n  expect(p.options.maxThreads).toBe(cpuCount * max)\n})\n\ntest('fractional thread limits result is 1 for very low fractions', async () => {\n  const min = 0.00005\n  const max = 0.00006\n  const p = new Tinypool({\n    minThreads: min,\n    maxThreads: max,\n  })\n\n  expect(p.options.minThreads).toBe(1)\n  expect(p.options.maxThreads).toBe(1)\n})\n\ntest('fractional thread limits in the wrong order throw an error', async () => {\n  expect(() => {\n    new Tinypool({\n      minThreads: 0.75,\n      maxThreads: 0.25,\n    })\n  }).toThrow()\n  expect(() => {\n    new Tinypool({\n      minThreads: 0.75,\n      maxThreads: 1,\n    })\n  }).toThrow()\n})\n\nvi.mock(import('node:os'), async (importOriginal) => {\n  const original = await importOriginal()\n  return {\n    ...original,\n    availableParallelism: () => cpuCount,\n  }\n})\n\nvi.mock(import('node:child_process'), async (importOriginal) => {\n  const original = await importOriginal()\n  return {\n    ...original,\n    default: { ...original.default, execSync: () => cpuCount as any },\n  }\n})\n"
  },
  {
    "path": "test/pool-destroy.test.ts",
    "content": "import { createHook } from 'node:async_hooks'\nimport { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('can destroy pool while tasks are running', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n  setImmediate(() => void pool.destroy())\n  await expect(pool.run('while(1){}')).rejects.toThrow(\n    /Terminating worker thread/\n  )\n})\n\ntest('destroy after initializing should work (#43)', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/sleep.js'),\n    isolateWorkers: true,\n  })\n\n  const promise = expect(pool.run({})).rejects.toThrow(\n    /Terminating worker thread/\n  )\n\n  setImmediate(() => void pool.destroy())\n  await promise\n})\n\ntest('cleans up async resources', async () => {\n  let onCleanup = () => {}\n  const waitForCleanup = new Promise<void>((r) => (onCleanup = r))\n  const timeout = setTimeout(() => {\n    throw new Error('Timeout waiting for async resource destroying')\n  }, 2_000).unref()\n\n  const ids = new Set<number>()\n\n  const hook = createHook({\n    init(asyncId, type) {\n      if (type === 'Tinypool') {\n        ids.add(asyncId)\n      }\n    },\n    destroy(asyncId) {\n      if (ids.has(asyncId)) {\n        ids.delete(asyncId)\n        onCleanup()\n        clearTimeout(timeout)\n      }\n    },\n  })\n  hook.enable()\n\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    maxThreads: 1,\n    minThreads: 1,\n  })\n\n  await pool.run('42')\n\n  expect(ids.size).toBe(1)\n\n  await pool.destroy()\n  await waitForCleanup\n\n  expect(ids.size).toBe(0)\n  hook.disable()\n})\n"
  },
  {
    "path": "test/resource-limits.test.ts",
    "content": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('resourceLimits causes task to reject', async () => {\n  const worker = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/resource-limits.js'),\n    resourceLimits: {\n      maxOldGenerationSizeMb: 4,\n      maxYoungGenerationSizeMb: 2,\n      codeRangeSizeMb: 4,\n    },\n  })\n  worker.on('error', () => {\n    // Ignore any additional errors that may occur.\n    // This may happen because when the Worker is\n    // killed a new worker is created that may hit\n    // the memory limits immediately. When that\n    // happens, there is no associated Promise to\n    // reject so we emit an error event instead.\n    // We don't care so much about that here. We\n    // could potentially avoid the issue by setting\n    // higher limits above but rather than try to\n    // guess at limits that may work consistently,\n    // let's just ignore the additional error for\n    // now.\n  })\n  const limits: any = worker.options.resourceLimits\n  expect(limits.maxOldGenerationSizeMb).toBe(4)\n  expect(limits.maxYoungGenerationSizeMb).toBe(2)\n  expect(limits.codeRangeSizeMb).toBe(4)\n  await expect(worker.run(null)).rejects.toThrow(\n    /Worker terminated due to reaching memory limit: JS heap out of memory/\n  )\n})\n\ndescribe.each(['worker_threads', 'child_process'] as const)('%s', (runtime) => {\n  test('worker is recycled after reaching maxMemoryLimitBeforeRecycle', async () => {\n    const pool = new Tinypool({\n      filename: resolve(__dirname, 'fixtures/leak-memory.js'),\n      maxMemoryLimitBeforeRecycle: 10_000_000,\n      isolateWorkers: false,\n      minThreads: 1,\n      maxThreads: 1,\n      runtime,\n    })\n\n    const originalWorkerId = pool.threads[0]?.threadId\n    expect(originalWorkerId).toBeGreaterThan(0)\n\n    let finalThreadId = originalWorkerId\n    let rounds = 0\n\n    // This is just an estimate of how to leak \"some\" memory - it's not accurate.\n    // Running 100 loops should be enough to make the worker reach memory limit and be recycled.\n    // Use the `rounds` to make sure we don't reach the limit on the first round.\n    for (const _ of Array(100).fill(0)) {\n      await pool.run(10_000)\n\n      if (pool.threads[0]) {\n        finalThreadId = pool.threads[0].threadId\n      }\n\n      if (finalThreadId !== originalWorkerId) {\n        break\n      }\n\n      rounds++\n    }\n\n    // Test setup should not reach max memory on first round\n    expect(rounds).toBeGreaterThan(1)\n\n    // Thread should have been recycled\n    expect(finalThreadId).not.toBe(originalWorkerId)\n  })\n\n  test('recycled workers should not crash pool (regression)', async () => {\n    const pool = new Tinypool({\n      filename: resolve(__dirname, 'fixtures/leak-memory.js'),\n      maxMemoryLimitBeforeRecycle: 10,\n      isolateWorkers: false,\n      minThreads: 2,\n      maxThreads: 2,\n      runtime,\n    })\n\n    // This should not crash the pool\n    await Promise.all(\n      Array(10)\n        .fill(0)\n        .map(() => pool.run(10_000))\n    )\n  })\n})\n"
  },
  {
    "path": "test/runtime.test.ts",
    "content": "import EventEmitter from 'node:events'\nimport * as path from 'node:path'\nimport { fileURLToPath } from 'node:url'\nimport { Tinypool } from 'tinypool'\n\nconst __dirname = path.dirname(fileURLToPath(import.meta.url))\n\ndescribe('worker_threads', () => {\n  test('runs code in worker_threads', async () => {\n    const pool = createPool({ runtime: 'worker_threads' })\n\n    const result = await pool.run(`\n      (async () => {\n        const workerThreads = await import(\"worker_threads\");\n\n        return {\n          sum: 11 + 12,\n          isMainThread: workerThreads.isMainThread,\n          pid: process.pid,\n        }\n      })()\n    `)\n    expect(result.sum).toBe(23)\n    expect(result.isMainThread).toBe(false)\n    expect(result.pid).toBe(process.pid)\n  })\n\n  test('sets tinypool state', async () => {\n    const pool = createPool({ runtime: 'worker_threads' })\n\n    const result = await pool.run('process.__tinypool_state__')\n    expect(result.isTinypoolWorker).toBe(true)\n    expect(result.isWorkerThread).toBe(true)\n    expect(result.isChildProcess).toBe(undefined)\n  })\n\n  test(\"worker's threadId is used as threadId\", async () => {\n    const pool = createPool({ runtime: 'worker_threads' })\n    const threadId = pool.threads[0]!.threadId\n\n    const result = await pool.run(`\n      (async () => {\n        const workerThreads = await import(\"worker_threads\");\n        return workerThreads.threadId;\n      })()\n    `)\n    expect(result).toBe(threadId)\n  })\n\n  test('channel is closed when isolated', async () => {\n    const pool = createPool({\n      runtime: 'worker_threads',\n      isolateWorkers: true,\n      minThreads: 2,\n      maxThreads: 2,\n    })\n\n    const events: string[] = []\n\n    await pool.run('', { channel: { onClose: () => events.push('call #1') } })\n    expect(events).toStrictEqual(['call #1'])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #2') } })\n    expect(events).toStrictEqual(['call #1', 'call #2'])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #3') } })\n    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])\n\n    await pool.destroy()\n    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])\n  })\n\n  test('channel is closed when non-isolated', async () => {\n    const pool = createPool({\n      runtime: 'worker_threads',\n      isolateWorkers: false,\n      minThreads: 2,\n      maxThreads: 2,\n    })\n\n    const events: string[] = []\n\n    await pool.run('', { channel: { onClose: () => events.push('call #1') } })\n    expect(events).toStrictEqual([])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #2') } })\n    expect(events).toStrictEqual(['call #1'])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #3') } })\n    expect(events).toStrictEqual(['call #1', 'call #2'])\n\n    await pool.destroy()\n    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])\n  })\n})\n\ndescribe('child_process', () => {\n  test('runs code in child_process', async () => {\n    const pool = createPool({ runtime: 'child_process' })\n\n    const result = await pool.run(`\n    (async () => {\n      const workerThreads = await import(\"worker_threads\");\n\n      return {\n        sum: 11 + 12,\n        isMainThread: workerThreads.isMainThread,\n        pid: process.pid,\n      }\n    })()\n  `)\n    expect(result.sum).toBe(23)\n    expect(result.isMainThread).toBe(true)\n    expect(result.pid).not.toBe(process.pid)\n  })\n\n  test('sets tinypool state', async () => {\n    const pool = createPool({ runtime: 'child_process' })\n\n    const result = await pool.run('process.__tinypool_state__')\n    expect(result.isTinypoolWorker).toBe(true)\n    expect(result.isChildProcess).toBe(true)\n    expect(result.isWorkerThread).toBe(undefined)\n  })\n\n  test(\"sub-process's process ID is used as threadId\", async () => {\n    const pool = createPool({ runtime: 'child_process' })\n    const threadId = pool.threads[0]!.threadId\n\n    const result = await pool.run('process.pid')\n    expect(result).toBe(threadId)\n  })\n\n  test('child process workerId should be internal tinypool workerId', async () => {\n    const pool = createPool({ runtime: 'child_process' })\n    const workerId = await pool.run('process.__tinypool_state__.workerId')\n    expect(workerId).toBe(1)\n  })\n\n  test('errors are serialized', async () => {\n    const pool = createPool({ runtime: 'child_process' })\n\n    const error = await pool\n      .run(\"throw new TypeError('Test message');\")\n      .catch((e) => e)\n\n    expect(error.name).toBe('TypeError')\n    expect(error.message).toBe('Test message')\n    expect(error.stack).toMatch('fixtures/eval.js')\n  })\n\n  test('can send messages to port', async () => {\n    const pool = createPool({\n      runtime: 'child_process',\n      filename: path.resolve(\n        __dirname,\n        'fixtures/child_process-communication.mjs'\n      ),\n    })\n\n    const emitter = new EventEmitter()\n\n    const startup = new Promise<void>((resolve) =>\n      emitter.on(\n        'response',\n        (message) => message === 'Child process started' && resolve()\n      )\n    )\n\n    const runPromise = pool.run('default', {\n      channel: {\n        onMessage: (callback) => emitter.on('message', callback),\n        postMessage: (message) => emitter.emit('response', message),\n      },\n    })\n\n    // Wait for the child process to start\n    await startup\n\n    const response = new Promise<any>((resolve) =>\n      emitter.on(\n        'response',\n        (message) => message !== 'Hello from main' && resolve(message)\n      )\n    )\n\n    // Send message to child process\n    emitter.emit('message', 'Hello from main')\n\n    // Wait for task to finish\n    await runPromise\n\n    // Wait for response from child\n    const result = await response\n\n    expect(result).toMatchObject({\n      received: 'Hello from main',\n      response: 'Hello from worker',\n    })\n  })\n\n  test('can send complex messages to port', async () => {\n    const pool = createPool({\n      runtime: 'child_process',\n      filename: path.resolve(\n        __dirname,\n        'fixtures/child_process-communication.mjs'\n      ),\n      serialization: 'advanced',\n    })\n\n    const complexData = {\n      bigint: 123456789123456789n,\n      map: new Map([['hello', 'world']]),\n      set: new Set(['hello', 'world']),\n      error: new Error('message'),\n      regexp: /regexp/,\n    }\n\n    const emitter = new EventEmitter()\n\n    const startup = new Promise<void>((resolve) =>\n      emitter.on(\n        'response',\n        (message) => message === 'Child process started' && resolve()\n      )\n    )\n\n    const runPromise = pool.run(complexData, {\n      channel: {\n        onMessage: (callback) => emitter.on('message', callback),\n        postMessage: (message) => emitter.emit('response', message),\n      },\n    })\n\n    // Wait for the child process to start\n    await startup\n\n    const response = new Promise<any>((resolve) =>\n      emitter.on('response', (message) => resolve(message))\n    )\n\n    // Send message to child process\n    emitter.emit('message', complexData)\n\n    // Wait for task to finish\n    const runResult = await runPromise\n\n    expect(runResult).toMatchObject({\n      received: complexData,\n      response: 'Hello from worker',\n    })\n\n    // Wait for response from child\n    const channelResult = await response\n\n    expect(channelResult).toMatchObject({\n      received: complexData,\n      response: 'Hello from worker',\n    })\n  })\n\n  test('channel is closed when isolated', async () => {\n    const pool = createPool({\n      runtime: 'child_process',\n      isolateWorkers: true,\n      minThreads: 2,\n      maxThreads: 2,\n    })\n\n    const events: string[] = []\n\n    await pool.run('', { channel: { onClose: () => events.push('call #1') } })\n    expect(events).toStrictEqual(['call #1'])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #2') } })\n    expect(events).toStrictEqual(['call #1', 'call #2'])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #3') } })\n    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])\n\n    await pool.destroy()\n    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])\n  })\n\n  test('channel is closed when non-isolated', async () => {\n    const pool = createPool({\n      runtime: 'child_process',\n      isolateWorkers: false,\n      minThreads: 2,\n      maxThreads: 2,\n    })\n\n    const events: string[] = []\n\n    await pool.run('', { channel: { onClose: () => events.push('call #1') } })\n    expect(events).toStrictEqual([])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #2') } })\n    expect(events).toStrictEqual(['call #1'])\n\n    await pool.run('', { channel: { onClose: () => events.push('call #3') } })\n    expect(events).toStrictEqual(['call #1', 'call #2'])\n\n    await pool.destroy()\n    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])\n  })\n})\n\ntest('runtime can be changed after recycle', async () => {\n  const pool = createPool({ runtime: 'worker_threads' })\n  const getState = 'process.__tinypool_state__'\n\n  await expect(\n    Promise.all([pool.run(getState), pool.run(getState)])\n  ).resolves.toMatchObject([{ isWorkerThread: true }, { isWorkerThread: true }])\n\n  await pool.recycleWorkers({ runtime: 'child_process' })\n\n  await expect(\n    Promise.all([pool.run(getState), pool.run(getState)])\n  ).resolves.toMatchObject([{ isChildProcess: true }, { isChildProcess: true }])\n\n  await pool.recycleWorkers({ runtime: 'worker_threads' })\n\n  expect(await pool.run(getState)).toMatchObject({\n    isWorkerThread: true,\n  })\n})\n\ntest('isolated idle workers change runtime after recycle', async () => {\n  const pool = createPool({\n    runtime: 'worker_threads',\n    minThreads: 2,\n    maxThreads: 2,\n    isolateWorkers: true,\n  })\n  const getState = 'process.__tinypool_state__'\n\n  await expect(pool.run(getState)).resolves.toMatchObject({\n    isWorkerThread: true,\n  })\n\n  await pool.recycleWorkers({ runtime: 'child_process' })\n\n  await expect(\n    Promise.all([pool.run(getState), pool.run(getState)])\n  ).resolves.toMatchObject([{ isChildProcess: true }, { isChildProcess: true }])\n})\n\nfunction createPool(options: Partial<Tinypool['options']>) {\n  const pool = new Tinypool({\n    filename: path.resolve(__dirname, 'fixtures/eval.js'),\n    minThreads: 1,\n    maxThreads: 1,\n    ...options,\n  })\n\n  return pool\n}\n"
  },
  {
    "path": "test/simple.test.ts",
    "content": "import EventEmitter from 'node:events'\nimport { cpus } from 'node:os'\nimport { dirname, resolve } from 'node:path'\nimport Tinypool from 'tinypool'\nimport { fileURLToPath, pathToFileURL } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\nconst sleep = async (num: number) =>\n  await new Promise((res) => setTimeout(res, num))\n\ntest('basic test', async () => {\n  const worker = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/simple-isworkerthread.js'),\n  })\n  const result = await worker.run(null)\n  expect(result).toBe('done')\n})\n\ntest('isWorkerThread correct value', async () => {\n  expect(Tinypool.isWorkerThread).toBe(false)\n})\n\ntest('Tinypool instance is an EventEmitter', async () => {\n  const piscina = new Tinypool()\n  expect(piscina instanceof EventEmitter).toBe(true)\n})\n\ntest('Tinypool constructor options are correctly set', async () => {\n  const piscina = new Tinypool({\n    minThreads: 10,\n    maxThreads: 20,\n    maxQueue: 30,\n  })\n\n  expect(piscina.options.minThreads).toBe(10)\n  expect(piscina.options.maxThreads).toBe(20)\n  expect(piscina.options.maxQueue).toBe(30)\n})\n//\ntest('trivial eval() handler works', async () => {\n  const worker = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n  const result = await worker.run('42')\n  expect(result).toBe(42)\n})\n\ntest('async eval() handler works', async () => {\n  const worker = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n  const result = await worker.run('Promise.resolve(42)')\n  expect(result).toBe(42)\n})\n\ntest('filename can be provided while posting', async () => {\n  const worker = new Tinypool()\n  const result = await worker.run('Promise.resolve(42)', {\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n  expect(result).toBe(42)\n})\n\ntest('filename can be null when initially provided', async () => {\n  const worker = new Tinypool({ filename: null })\n  const result = await worker.run('Promise.resolve(42)', {\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n  expect(result).toBe(42)\n})\n\ntest('filename must be provided while posting', async () => {\n  const worker = new Tinypool()\n  await expect(worker.run('doesn’t matter')).rejects.toThrow(\n    /filename must be provided to run\\(\\) or in options object/\n  )\n})\n\ntest('passing env to workers works', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    env: { A: 'foo' },\n  })\n\n  const env = await pool.run('({...process.env})')\n  expect(env).toEqual({ A: 'foo' })\n})\n\ntest('passing argv to workers works', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    argv: ['a', 'b', 'c'],\n  })\n\n  const env = await pool.run('process.argv.slice(2)')\n  expect(env).toEqual(['a', 'b', 'c'])\n})\n\ntest('passing argv to child process', async () => {\n  const pool = new Tinypool({\n    runtime: 'child_process',\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    argv: ['a', 'b', 'c'],\n  })\n\n  const env = await pool.run('process.argv.slice(2)')\n  expect(env).toEqual(['a', 'b', 'c'])\n})\n\ntest('passing execArgv to workers works', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    execArgv: ['--no-warnings'],\n  })\n\n  const env = await pool.run('process.execArgv')\n  expect(env).toEqual(['--no-warnings'])\n})\n\ntest('passing valid workerData works', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/simple-workerdata.js'),\n    workerData: 'ABC',\n  })\n  expect(Tinypool.workerData).toBe(undefined)\n\n  await pool.run(null)\n})\n\ntest('filename can be a file:// URL', async () => {\n  const worker = new Tinypool({\n    filename: pathToFileURL(resolve(__dirname, 'fixtures/eval.js')).href,\n  })\n  const result = await worker.run('42')\n  expect(result).toBe(42)\n})\n\ntest('filename can be a file:// URL to an ESM module', async () => {\n  const worker = new Tinypool({\n    filename: pathToFileURL(resolve(__dirname, 'fixtures/esm-export.mjs')).href,\n  })\n  const result = await worker.run('42')\n  expect(result).toBe(42)\n})\n\ntest('named tasks work', async () => {\n  const worker = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/multiple.js'),\n  })\n\n  expect(await worker.run({}, { name: 'a' })).toBe('a')\n  expect(await worker.run({}, { name: 'b' })).toBe('b')\n  expect(await worker.run({})).toBe('a')\n})\n\ntest('named tasks work', async () => {\n  const worker = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/multiple.js'),\n    name: 'b',\n  })\n\n  expect(await worker.run({}, { name: 'a' })).toBe('a')\n  expect(await worker.run({}, { name: 'b' })).toBe('b')\n  expect(await worker.run({})).toBe('b')\n})\n\ntest('isolateWorkers: false', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/isolated.js'),\n    isolateWorkers: false,\n  })\n\n  expect(await pool.run({})).toBe(0)\n  expect(await pool.run({})).toBe(1)\n  expect(await pool.run({})).toBe(2)\n})\n\ntest('isolateWorkers: true', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/isolated.js'),\n    isolateWorkers: true,\n  })\n\n  expect(await pool.run({})).toBe(0)\n  expect(await pool.run({})).toBe(0)\n  expect(await pool.run({})).toBe(0)\n})\n\ntest('workerId should never be more than maxThreads=1', async () => {\n  const maxThreads = 1\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/workerId.js'),\n    isolateWorkers: true,\n    maxThreads: maxThreads,\n  })\n  await pool.destroy()\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n\n  await sleep(300)\n})\n\ntest('workerId should never be more than maxThreads', async () => {\n  const maxThreads = Math.floor(Math.random() * (4 - 1 + 1) + 1)\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/workerId.js'),\n    isolateWorkers: true,\n    maxThreads: maxThreads,\n  })\n  await pool.destroy()\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)\n\n  await sleep(300)\n})\n\ntest('worker count should never be below minThreads when using isolateWorkers', async () => {\n  const minThreads = 4\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/workerId.js'),\n    isolateWorkers: true,\n    minThreads,\n  })\n  await pool.run({})\n  expect(pool.threads.length).toBe(minThreads)\n  await pool.run({})\n  expect(pool.threads.length).toBe(minThreads)\n  await pool.run({})\n  expect(pool.threads.length).toBe(minThreads)\n  await pool.run({})\n  expect(pool.threads.length).toBe(minThreads)\n  await pool.run({})\n  expect(pool.threads.length).toBe(minThreads)\n  await pool.run({})\n  expect(pool.threads.length).toBe(minThreads)\n\n  await sleep(300)\n})\n\ntest('workerId should never be duplicated', async () => {\n  const maxThreads = cpus().length + 4\n  // console.log('maxThreads', maxThreads)\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/workerId.js'),\n    isolateWorkers: true,\n    // challenge tinypool\n    maxThreads,\n  })\n  let duplicated = false\n  const workerIds: number[] = []\n\n  function addWorkerId(workerId: number) {\n    if (workerIds.includes(workerId)) {\n      duplicated = true\n      // console.log('fucked')\n    }\n    workerIds.push(workerId)\n  }\n\n  const createWorkerId = async (): Promise<number> => {\n    const result = await pool.run({})\n    addWorkerId(result)\n    return result\n  }\n\n  for (let i = 0; i < 20; i++) {\n    if (duplicated) {\n      continue\n    }\n    await Promise.all(\n      new Array(maxThreads - 2).fill(0).map(() => createWorkerId())\n    )\n    workerIds.length = 0\n\n    expect(duplicated).toBe(false)\n  }\n\n  await pool.destroy()\n  await sleep(3000)\n}, 30000)\n\ntest('isolateWorkers: true with minThreads of 0 should not halt (#42)', async () => {\n  const minThreads = 0,\n    maxThreads = 6\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/isolated.js'),\n    minThreads,\n    maxThreads,\n    isolateWorkers: true,\n  })\n  // https://github.com/tinylibs/tinypool/pull/44#discussion_r1070169279\n  const promises = []\n  for (let i = 0; i < maxThreads + 1; i++) {\n    promises.push(pool.run({}))\n  }\n  await Promise.all(promises)\n})\n"
  },
  {
    "path": "test/task-queue.test.ts",
    "content": "import { dirname, resolve } from 'node:path'\nimport { Tinypool, type Task, type TaskQueue } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('will put items into a task queue until they can run', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/wait-for-notify.js'),\n    minThreads: 2,\n    maxThreads: 3,\n  })\n  expect(pool.threads.length).toBe(2)\n  expect(pool.queueSize).toBe(0)\n\n  const buffers = [\n    new Int32Array(new SharedArrayBuffer(4)),\n    new Int32Array(new SharedArrayBuffer(4)),\n    new Int32Array(new SharedArrayBuffer(4)),\n    new Int32Array(new SharedArrayBuffer(4)),\n  ]\n\n  const results = []\n\n  results.push(pool.run(buffers[0]))\n  expect(pool.threads.length).toBe(2)\n  expect(pool.queueSize).toBe(0)\n\n  results.push(pool.run(buffers[1]))\n  expect(pool.threads.length).toBe(2)\n  expect(pool.queueSize).toBe(0)\n\n  results.push(pool.run(buffers[2]))\n  expect(pool.threads.length).toBe(3)\n  expect(pool.queueSize).toBe(0)\n\n  results.push(pool.run(buffers[3]))\n  expect(pool.threads.length).toBe(3)\n  expect(pool.queueSize).toBe(1)\n\n  for (const buffer of buffers) {\n    Atomics.store(buffer, 0, 1)\n    Atomics.notify(buffer, 0, 1)\n  }\n\n  await results[0]\n  expect(pool.queueSize).toBe(0)\n\n  await Promise.all(results)\n})\n\ntest('will reject items over task queue limit', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    minThreads: 0,\n    maxThreads: 1,\n    maxQueue: 2,\n  })\n  const promises: Promise<void>[] = []\n\n  expect(pool.threads.length).toBe(0)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /Terminating worker thread/\n    )\n  )\n\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /Terminating worker thread/\n    )\n  )\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(1)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /Terminating worker thread/\n    )\n  )\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(2)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /Task queue is at limit/\n    )\n  )\n\n  await pool.destroy()\n  await Promise.all(promises)\n})\n\ntest('will reject items when task queue is unavailable', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    minThreads: 0,\n    maxThreads: 1,\n    maxQueue: 0,\n  })\n  const promises: Promise<void>[] = []\n\n  expect(pool.threads.length).toBe(0)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /Terminating worker thread/\n    )\n  )\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /No task queue available and all Workers are busy/\n    )\n  )\n\n  await pool.destroy()\n  await Promise.all(promises)\n})\n\ntest('will reject items when task queue is unavailable (fixed thread count)', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    minThreads: 1,\n    maxThreads: 1,\n    maxQueue: 0,\n  })\n  const promises: Promise<void>[] = []\n\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /Terminating worker thread/\n    )\n  )\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(pool.run('while (true) {}')).rejects.toThrow(\n      /No task queue available and all Workers are busy/\n    )\n  )\n\n  await pool.destroy()\n  await Promise.all(promises)\n})\n\ntest('tasks can share a Worker if requested (both tests blocking)', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/wait-for-notify.js'),\n    minThreads: 0,\n    maxThreads: 1,\n    maxQueue: 0,\n    concurrentTasksPerWorker: 2,\n  })\n  const promises: Promise<void>[] = []\n\n  expect(pool.threads.length).toBe(0)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(\n      pool.run(new Int32Array(new SharedArrayBuffer(4)))\n    ).rejects.toBeTruthy()\n  )\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  promises.push(\n    expect(\n      pool.run(new Int32Array(new SharedArrayBuffer(4)))\n    ).rejects.toBeTruthy()\n  )\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  await pool.destroy()\n  await Promise.all(promises)\n})\n\ntest('tasks can share a Worker if requested (both tests finish)', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/wait-for-notify.js'),\n    minThreads: 1,\n    maxThreads: 1,\n    maxQueue: 0,\n    concurrentTasksPerWorker: 2,\n  })\n\n  const buffers = [\n    new Int32Array(new SharedArrayBuffer(4)),\n    new Int32Array(new SharedArrayBuffer(4)),\n  ] as const\n\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  const firstTask = pool.run(buffers[0])\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  const secondTask = pool.run(buffers[1])\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n\n  Atomics.store(buffers[0] as any, 0, 1)\n  Atomics.store(buffers[1] as any, 0, 1)\n  Atomics.notify(buffers[0] as any, 0, 1)\n  Atomics.notify(buffers[1] as any, 0, 1)\n  Atomics.wait(buffers[0] as any, 0, 1)\n  Atomics.wait(buffers[1] as any, 0, 1)\n\n  await firstTask\n  expect(buffers[0][0]).toBe(-1)\n  await secondTask\n  expect(buffers[1][0]).toBe(-1)\n\n  expect(pool.threads.length).toBe(1)\n  expect(pool.queueSize).toBe(0)\n})\n\ntest('custom task queue works', async () => {\n  let sizeCalled: boolean = false\n  let shiftCalled: boolean = false\n  let pushCalled: boolean = false\n\n  class CustomTaskPool implements TaskQueue {\n    tasks: Task[] = []\n\n    get size(): number {\n      sizeCalled = true\n      return this.tasks.length\n    }\n\n    shift(): Task | null {\n      shiftCalled = true\n      return this.tasks.length > 0 ? (this.tasks.shift() as Task) : null\n    }\n\n    push(task: Task): void {\n      pushCalled = true\n      this.tasks.push(task)\n\n      expect(Tinypool.queueOptionsSymbol in task).toBeTruthy()\n      if ((task as any).task.a === 3) {\n        // @ts-expect-error -- intentional\n        expect(task[Tinypool.queueOptionsSymbol]).toBeNull()\n      } else {\n        // @ts-expect-error -- intentional\n        expect(task[Tinypool.queueOptionsSymbol].option).toEqual(\n          (task as any).task.a\n        )\n      }\n    }\n\n    remove(task: Task): void {\n      const index = this.tasks.indexOf(task)\n      this.tasks.splice(index, 1)\n    }\n\n    cancel() {}\n  }\n\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    taskQueue: new CustomTaskPool(),\n    // Setting maxThreads low enough to ensure we queue\n    maxThreads: 1,\n    minThreads: 1,\n  })\n\n  function makeTask(task: any, option: any) {\n    return { ...task, [Tinypool.queueOptionsSymbol]: { option } }\n  }\n\n  const ret = await Promise.all([\n    pool.run(makeTask({ a: 1 }, 1)),\n    pool.run(makeTask({ a: 2 }, 2)),\n    pool.run({ a: 3 }), // No queueOptionsSymbol attached\n  ])\n\n  expect(ret[0].a).toBe(1)\n  expect(ret[1].a).toBe(2)\n  expect(ret[2].a).toBe(3)\n\n  expect(sizeCalled).toBeTruthy()\n  expect(pushCalled).toBeTruthy()\n  expect(shiftCalled).toBeTruthy()\n})\n\ntest('queued tasks can be cancelled', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/sleep.js'),\n    minThreads: 0,\n    maxThreads: 1,\n  })\n\n  const time = 2000\n  const taskCount = 10\n\n  const promises = []\n  let finishedTasks = 0\n  let cancelledTasks = 0\n\n  for (const _ of Array(taskCount)) {\n    const promise = pool\n      .run({ time })\n      .then(() => {\n        finishedTasks++\n      })\n      .catch((error) => {\n        if (error.message !== 'The task has been cancelled') {\n          throw error\n        }\n        cancelledTasks++\n      })\n    promises.push(promise)\n  }\n\n  // Wait for the first task to start\n  await new Promise((resolve) => setTimeout(resolve, time / 2))\n  expect(pool.queueSize).toBe(taskCount - 1)\n\n  // One task is running, cancel the pending ones\n  pool.cancelPendingTasks()\n\n  // The first task should still be on-going, pending ones should have started their cancellation\n  expect(finishedTasks).toBe(0)\n  expect(pool.queueSize).toBe(0)\n\n  await Promise.all(promises)\n\n  expect({ finishedTasks, cancelledTasks }).toEqual({\n    finishedTasks: 1,\n    cancelledTasks: taskCount - 1,\n  })\n})\n"
  },
  {
    "path": "test/teardown.test.ts",
    "content": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\nimport { MessageChannel } from 'node:worker_threads'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('isolated workers call teardown on worker recycle', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/teardown.mjs'),\n    minThreads: 1,\n    maxThreads: 1,\n    isolateWorkers: true,\n    teardown: 'namedTeardown',\n  })\n\n  for (const _ of [1, 2, 3, 4, 5]) {\n    const { port1, port2 } = new MessageChannel()\n    const promise = new Promise((resolve) => port2.on('message', resolve))\n\n    const output = await pool.run({ port: port1 }, { transferList: [port1] })\n    expect(output).toBe('Output of task #1')\n\n    await expect(promise).resolves.toBe('Teardown of task #1')\n  }\n})\n\ntest('non-isolated workers call teardown on worker recycle', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/teardown.mjs'),\n    minThreads: 1,\n    maxThreads: 1,\n    isolateWorkers: false,\n    teardown: 'namedTeardown',\n  })\n\n  function unexpectedTeardown(message: string) {\n    assert.fail(\n      `Teardown should not have been called yet. Received \"${message}\"`\n    )\n  }\n\n  const { port1, port2 } = new MessageChannel()\n\n  for (const index of [1, 2, 3, 4, 5]) {\n    port2.on('message', unexpectedTeardown)\n\n    const transferList = index === 1 ? [port1] : []\n\n    const output = await pool.run({ port: transferList[0] }, { transferList })\n    expect(output).toBe(`Output of task #${index}`)\n  }\n\n  port2.off('message', unexpectedTeardown)\n  const promise = new Promise((resolve) => port2.on('message', resolve))\n\n  await pool.destroy()\n  await expect(promise).resolves.toMatchInlineSnapshot(`\"Teardown of task #5\"`)\n})\n"
  },
  {
    "path": "test/termination.test.ts",
    "content": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\nconst cleanups: (() => Promise<unknown>)[] = []\n\nafterEach(async () => {\n  await Promise.all(cleanups.splice(0).map((cleanup) => cleanup()))\n})\n\ntest('termination timeout throws when worker does not terminate in time', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/sleep.js'),\n    terminateTimeout: 10,\n    minThreads: 1,\n    maxThreads: 2,\n    isolateWorkers: true,\n  })\n\n  expect(pool.threads.length).toBe(1)\n\n  const worker = pool.threads[0]\n  expect(worker).toBeTruthy()\n\n  cleanups.push(worker!.terminate.bind(worker))\n  worker!.terminate = () => new Promise(() => {})\n\n  await expect(pool.run('default')).rejects.toThrowError(\n    'Failed to terminate worker'\n  )\n})\n\ntest('writing to terminating worker does not crash', async () => {\n  const listeners: ((msg: any) => void)[] = []\n\n  const pool = new Tinypool({\n    runtime: 'child_process',\n    filename: resolve(__dirname, 'fixtures/sleep.js'),\n    minThreads: 1,\n    maxThreads: 1,\n  })\n\n  await pool.run(\n    {},\n    {\n      channel: {\n        onMessage: (listener) => listeners.push(listener),\n        postMessage: () => {},\n      },\n    }\n  )\n\n  const destroyed = pool.destroy()\n  listeners.forEach((listener) => listener('Hello from main thread'))\n\n  await destroyed\n})\n\ntest('recycling workers while closing pool does not crash', async () => {\n  const pool = new Tinypool({\n    runtime: 'child_process',\n    filename: resolve(__dirname, 'fixtures/nested-pool.mjs'),\n    isolateWorkers: true,\n    minThreads: 1,\n    maxThreads: 1,\n  })\n\n  await Promise.all(\n    (Array(10) as (() => Promise<any>)[])\n      .fill(() => pool.run({}))\n      .map((fn) => fn())\n  )\n\n  await pool.destroy()\n})\n"
  },
  {
    "path": "test/uncaught-exception-from-handler.test.ts",
    "content": "import { dirname, resolve, sep } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url'\nimport { once } from 'node:events'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\ntest('uncaught exception resets Worker', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n\n  await expect(pool.run('throw new Error(\"not_caught\")')).rejects.toThrow(\n    /not_caught/\n  )\n})\n\ntest('uncaught exception in immediate resets Worker', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n\n  await expect(\n    pool.run(`\n    setImmediate(() => { throw new Error(\"not_caught\") });\n    new Promise(() => {}) // act as if we were doing some work\n  `)\n  ).rejects.toThrow(/not_caught/)\n})\n\ntest('uncaught exception in immediate after task yields error event', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n    maxThreads: 1,\n    useAtomics: false,\n  })\n\n  const errorEvent: Promise<Error[]> = once(pool, 'error')\n\n  const taskResult = pool.run(`\n    setTimeout(() => { throw new Error(\"not_caught\") }, 500);\n    42\n  `)\n\n  expect(await taskResult).toBe(42)\n\n  // Hack a bit to make sure we get the 'exit'/'error' events.\n  expect(pool.threads.length).toBe(1)\n  pool.threads[0]!.ref?.()\n\n  // This is the main aassertion here.\n  expect((await errorEvent)[0]!.message).toEqual('not_caught')\n})\n\ntest('using parentPort is treated as an error', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n  await expect(\n    pool.run(`\n    (async () => {\n      console.log();\n      const parentPort = (await import('worker_threads')).parentPort;\n      parentPort.postMessage(\"some message\");\n      new Promise(() => {}) /* act as if we were doing some work */\n    })()\n      `)\n  ).rejects.toThrow(/Unexpected message on Worker: 'some message'/)\n})\n\ntest('no named handler found from worker', async () => {\n  const pool = new Tinypool({\n    filename: resolve(__dirname, 'fixtures/eval.js'),\n  })\n\n  let errorMessage = 'Worker did not throw error'\n\n  try {\n    await pool.run('', { name: 'someHandler' })\n  } catch (error) {\n    errorMessage = error instanceof Error ? error.message : String(error)\n  }\n\n  expect(\n    errorMessage.replace(process.cwd(), '<process-cwd>').replaceAll(sep, '/')\n  ).toMatchInlineSnapshot(\n    `\"No handler function \"someHandler\" exported from \"<process-cwd>/test/fixtures/eval.js\"\"`\n  )\n})\n"
  },
  {
    "path": "test/worker-stdio.test.ts",
    "content": "import * as path from 'node:path'\nimport { fileURLToPath } from 'node:url'\nimport { stripVTControlCharacters } from 'node:util'\nimport { Tinypool } from 'tinypool'\n\nconst runtimes = ['worker_threads', 'child_process'] as const\nconst __dirname = path.dirname(fileURLToPath(import.meta.url))\n\ntest.each(runtimes)(\n  \"worker's stdout and stderr are piped to main thread when { runtime: '%s' }\",\n  async (runtime) => {\n    const pool = createPool({\n      runtime,\n      minThreads: 1,\n      maxThreads: 1,\n    })\n\n    const getStdout = captureStandardStream('stdout')\n    const getStderr = captureStandardStream('stderr')\n\n    await pool.run({})\n\n    const stdout = getStdout()\n    const stderr = getStderr()\n\n    expect(stdout).toMatch('Worker message')\n\n    expect(stderr).toMatch('Worker error')\n  }\n)\n\nfunction createPool(options: Partial<Tinypool['options']>) {\n  const pool = new Tinypool({\n    filename: path.resolve(__dirname, 'fixtures/stdio.mjs'),\n    minThreads: 1,\n    maxThreads: 1,\n    ...options,\n  })\n\n  return pool\n}\n\nfunction captureStandardStream(type: 'stdout' | 'stderr') {\n  const spy = vi.fn()\n\n  // eslint-disable-next-line @typescript-eslint/unbound-method\n  const original = process[type].write\n  process[type].write = spy\n\n  return function collect() {\n    process[type].write = original\n    return stripVTControlCharacters(\n      spy.mock.calls.map((call) => call[0]).join('')\n    )\n  }\n}\n"
  },
  {
    "path": "tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"ESNext\",\n    \"module\": \"ESNext\",\n    \"strict\": true,\n    \"moduleResolution\": \"Bundler\",\n    \"lib\": [\"ESNext\", \"WebWorker\"],\n    \"noUncheckedIndexedAccess\": true,\n    \"baseUrl\": \".\",\n    \"noUnusedLocals\": true,\n    \"noUnusedParameters\": true,\n    \"noImplicitReturns\": true,\n    \"noFallthroughCasesInSwitch\": true,\n    \"esModuleInterop\": true,\n    \"resolveJsonModule\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"types\": [\"vitest/globals\", \"@types/node\"],\n    \"paths\": {\n      \"tinypool\": [\"./dist/index.d.ts\"]\n    }\n  },\n  \"include\": [\"./*.d.ts\", \"src/**/*\", \"test/**/*\"],\n  \"exclude\": [\"node_modules\", \"dist\"]\n}\n"
  },
  {
    "path": "tsdown.config.ts",
    "content": "import { defineConfig } from 'tsdown'\n\nexport default defineConfig({\n  entry: ['src/index.ts', 'src/entry/*.ts'],\n})\n"
  },
  {
    "path": "vitest.config.ts",
    "content": "import { dirname, resolve } from 'node:path'\nimport { defineConfig } from 'vitest/config'\nimport { fileURLToPath } from 'node:url'\n\nconst __dirname = dirname(fileURLToPath(import.meta.url))\n\nexport default defineConfig({\n  resolve: {\n    alias: {\n      tinypool: resolve(__dirname, './dist/index.js'),\n    },\n  },\n  test: {\n    globals: true,\n    isolate: false,\n\n    benchmark: {\n      include: ['**/**.bench.ts'],\n    },\n  },\n})\n"
  }
]