Full Code of Aslemammad/tinypool for AI

main abc247f85cba cached
68 files
138.2 KB
37.5k tokens
193 symbols
1 requests
Download .txt
Repository: Aslemammad/tinypool
Branch: main
Commit: abc247f85cba
Files: 68
Total size: 138.2 KB

Directory structure:
gitextract_uc9pk7z0/

├── .clean-publish
├── .github/
│   ├── FUNDING.yml
│   ├── dependabot.yml
│   └── workflows/
│       ├── benchmark.yml
│       ├── nodejs.yml
│       ├── publish.yml
│       └── release-commits.yml
├── .gitignore
├── .npmignore
├── .prettierrc
├── .taprc
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING
├── LICENSE
├── README.md
├── benchmark/
│   ├── fixtures/
│   │   ├── add-process.mjs
│   │   ├── add-worker.mjs
│   │   └── add.mjs
│   ├── isolate-benchmark.bench.ts
│   └── simple.bench.ts
├── eslint.config.js
├── global.d.ts
├── package.json
├── src/
│   ├── common.ts
│   ├── entry/
│   │   ├── process.ts
│   │   ├── utils.ts
│   │   └── worker.ts
│   ├── index.ts
│   ├── runtime/
│   │   ├── process-worker.ts
│   │   └── thread-worker.ts
│   └── utils.ts
├── test/
│   ├── async-context.test.ts
│   ├── atomic.test.ts
│   ├── fixtures/
│   │   ├── child_process-communication.mjs
│   │   ├── esm-export.mjs
│   │   ├── eval.js
│   │   ├── isolated.js
│   │   ├── leak-memory.js
│   │   ├── move.js
│   │   ├── multiple.js
│   │   ├── nested-pool.mjs
│   │   ├── notify-then-sleep-or.js
│   │   ├── resource-limits.js
│   │   ├── simple-isworkerthread.js
│   │   ├── simple-workerdata.js
│   │   ├── sleep.js
│   │   ├── stdio.mjs
│   │   ├── teardown.mjs
│   │   ├── wait-for-notify.js
│   │   ├── wait-for-others.js
│   │   └── workerId.js
│   ├── globals.test.ts
│   ├── idle-timeout.test.ts
│   ├── isolation.test.ts
│   ├── move.test.ts
│   ├── options.test.ts
│   ├── pool-destroy.test.ts
│   ├── resource-limits.test.ts
│   ├── runtime.test.ts
│   ├── simple.test.ts
│   ├── task-queue.test.ts
│   ├── teardown.test.ts
│   ├── termination.test.ts
│   ├── uncaught-exception-from-handler.test.ts
│   └── worker-stdio.test.ts
├── tsconfig.json
├── tsdown.config.ts
└── vitest.config.ts

================================================
FILE CONTENTS
================================================

================================================
FILE: .clean-publish
================================================
{
    "cleanDocs": true
}


================================================
FILE: .github/FUNDING.yml
================================================
open_collective: aslemammad
github: [aslemammad]


================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
  - package-ecosystem: "npm" # See documentation for possible values
    directory: "/" # Location of package manifests
    schedule:
      interval: "daily"

================================================
FILE: .github/workflows/benchmark.yml
================================================
on: [workflow_dispatch]

name: Benchmark

jobs:
  test:
    name: Test
    strategy:
      fail-fast: false
      matrix:
        os: [ubuntu-latest, macos-latest, windows-latest]
        node-version: [20.x, 22.x]

    runs-on: ${{matrix.os}}
    steps:
      - uses: actions/checkout@v2

      - name: Use Node.js ${{ matrix.node-version }}
        uses: actions/setup-node@v1
        with:
          node-version: ${{ matrix.node-version }}

      - uses: pnpm/action-setup@v2

      - name: Install Dependencies
        run: pnpm install

      - name: Build
        run: pnpm build

      - name: Benchmark
        run: pnpm bench


================================================
FILE: .github/workflows/nodejs.yml
================================================
name: CI

on:
  push:
    branches:
      - main
  pull_request:
  workflow_dispatch:

jobs:
  test:
    name: Test
    strategy:
      fail-fast: false
      matrix:
        os: [ubuntu-latest, macos-latest, windows-latest]
        node-version: [20.x, 22.x]

    runs-on: ${{matrix.os}}
    steps:
      - uses: actions/checkout@v2

      - name: Use Node.js ${{ matrix.node-version }}
        uses: actions/setup-node@v1
        with:
          node-version: ${{ matrix.node-version }}

      - uses: pnpm/action-setup@v2

      - name: Install Dependencies
        run: pnpm install

      - name: Build
        run: pnpm build

      - name: Typecheck
        run: pnpm typecheck

      - name: Lint
        run: pnpm lint

      - name: Test
        run: pnpm test


================================================
FILE: .github/workflows/publish.yml
================================================
name: Publish

on:
  workflow_dispatch:
    inputs:
      release-type:
        type: choice
        description: Type of the release
        options:
          - patch
          - minor
          - major

permissions:
  contents: write
  id-token: write

jobs:
  publish:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
        with:
          fetch-depth: 0

      - uses: pnpm/action-setup@v2

      - uses: actions/setup-node@v4
        with:
          node-version: 22
          registry-url: 'https://registry.npmjs.org'

      # OICD requires updated npm even when pnpm is used
      - name: Update npm
        run: |
          npm --version
          npm install -g npm@latest
          npm --version

      - name: Install Dependencies
        run: pnpm install

      - name: Build
        run: pnpm build

      - name: Typecheck
        run: pnpm typecheck

      - name: Lint
        run: pnpm lint

      - name: Test
        run: pnpm test

      - name: Configure github-actions git
        run: |
          git config --global user.name 'github-actions'
          git config --global user.email 'github-actions@users.noreply.github.com'

      - name: Bump version
        run: pnpm version ${{ github.event.inputs.release-type }}

      - name: Push release tag
        run: git push origin main --follow-tags

      - name: Publish to npm
        run: pnpm publish


================================================
FILE: .github/workflows/release-commits.yml
================================================
name: Publish Any Commit

on: [push, pull_request]

jobs:
  publish:
    name: Publish commit
    runs-on: ubuntu-latest
    if: github.repository == 'tinylibs/tinypool'

    steps:
      - uses: actions/checkout@v2

      - name: Use Node.js 22.x
        uses: actions/setup-node@v1
        with:
          node-version: 22.x

      - uses: pnpm/action-setup@v2

      - name: Install Dependencies
        run: pnpm install

      - name: Build
        run: pnpm build

      - run: pnpx pkg-pr-new publish --compact


================================================
FILE: .gitignore
================================================
.nyc_output
.vscode
.idea
node_modules
dist
coverage


================================================
FILE: .npmignore
================================================
.github
.nyc_output
package-lock.json
coverage
examples


================================================
FILE: .prettierrc
================================================
{
  "endOfLine": "auto",
  "singleQuote": true,
  "semi": false,
  "trailingComma": "es5"
}


================================================
FILE: .taprc
================================================
check-coverage: false
color: true
coverage: true
coverage-report:
  - html
  - text
jobs: 2
no-browser: true
test-env: TS_NODE_PROJECT=test/tsconfig.json
test-ignore: $.
test-regex: ((\/|^)(tests?|__tests?__)\/.*|\.(tests?|spec)|^\/?tests?)\.([mc]js|ts)$
timeout: 60
ts: true


================================================
FILE: CODE_OF_CONDUCT.md
================================================
# Contributor Covenant Code of Conduct

## Our Pledge

We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.

We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.

## Our Standards

Examples of behavior that contributes to a positive environment for our
community include:

- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
  and learning from the experience
- Focusing on what is best not just for us as individuals, but for the
  overall community

Examples of unacceptable behavior include:

- The use of sexualized language or imagery, and sexual attention or
  advances of any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email
  address, without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
  professional setting

## Enforcement Responsibilities

Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.

Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.

## Scope

This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.

## Enforcement

Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
jasnell@gmail.com, anna@addaleax.net, or matteo.collina@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.

All community leaders are obligated to respect the privacy and security of the
reporter of any incident.

## Enforcement Guidelines

Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:

### 1. Correction

**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.

**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.

### 2. Warning

**Community Impact**: A violation through a single incident or series
of actions.

**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.

### 3. Temporary Ban

**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.

**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.

### 4. Permanent Ban

**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.

**Consequence**: A permanent ban from any sort of public interaction within
the community.

## Attribution

This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.

Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).

[homepage]: https://www.contributor-covenant.org

For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.


================================================
FILE: CONTRIBUTING
================================================
# Piscina is an OPEN Open Source Project

## What?

Individuals making significant and valuable contributions are given commit-access to the project to contribute as they see fit. This project is more like an open wiki than a standard guarded open source project.

## Rules

There are a few basic ground-rules for contributors:

1. **No `--force` pushes** on `master` or modifying the Git history in any way after a PR has been merged.
1. **Non-master branches** ought to be used for ongoing work.
1. **External API changes and significant modifications** ought to be subject to an **internal pull-request** to solicit feedback from other contributors.
1. Internal pull-requests to solicit feedback are *encouraged* for any other non-trivial contribution but left to the discretion of the contributor.
1. Contributors should attempt to adhere to the prevailing code-style.
1. 100% code coverage
1. Semantic Versioning is used.

## Releases

Declaring formal releases remains the prerogative of the project maintainer.

## Changes to this arrangement

This document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change.

-----------------------------------------


================================================
FILE: LICENSE
================================================
The MIT License (MIT)

Copyright (c) 2020 James M Snell and the Piscina contributors

Piscina contributors listed at https://github.com/jasnell/piscina#the-team and
in the README file.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
# Tinypool - the node.js worker pool 🧵

> Piscina: A fast, efficient Node.js Worker Thread Pool implementation

Tinypool is a fork of piscina. What we try to achieve in this library, is to eliminate some dependencies and features that our target users don't need (currently, our main user will be Vitest). Tinypool's install size (38KB) can then be smaller than Piscina's install size (6MB when Tinypool was created, Piscina has since reduced it's size to ~800KB). If you need features like [utilization](https://github.com/piscinajs/piscina#property-utilization-readonly) or OS-specific thread priority setting, [Piscina](https://github.com/piscinajs/piscina) is a better choice for you. We think that Piscina is an amazing library, and we may try to upstream some of the dependencies optimization in this fork.

- ✅ Smaller install size, 38KB
- ✅ Minimal
- ✅ No dependencies
- ✅ Physical cores instead of Logical cores with [physical-cpu-count](https://www.npmjs.com/package/physical-cpu-count)
- ✅ Supports `worker_threads` and `child_process`
- ❌ No utilization
- ❌ No OS-specific thread priority setting

- Written in TypeScript, and ESM support only. For Node.js 18.x and higher.

_In case you need more tiny libraries like tinypool or tinyspy, please consider submitting an [RFC](https://github.com/tinylibs/rfcs)_

## Example

### Using `node:worker_threads`

#### Basic usage

```js
// main.mjs
import Tinypool from 'tinypool'

const pool = new Tinypool({
  filename: new URL('./worker.mjs', import.meta.url).href,
})
const result = await pool.run({ a: 4, b: 6 })
console.log(result) // Prints 10

// Make sure to destroy pool once it's not needed anymore
// This terminates all pool's idle workers
await pool.destroy()
```

```js
// worker.mjs
export default ({ a, b }) => {
  return a + b
}
```

#### Main thread <-> worker thread communication

<details>
  <summary>See code</summary>

```js
// main.mjs
import Tinypool from 'tinypool'
import { MessageChannel } from 'node:worker_threads'

const pool = new Tinypool({
  filename: new URL('./worker.mjs', import.meta.url).href,
})
const { port1, port2 } = new MessageChannel()
const promise = pool.run({ port: port1 }, { transferList: [port1] })

port2.on('message', (message) => console.log('Main thread received:', message))
setTimeout(() => port2.postMessage('Hello from main thread!'), 1000)

await promise

port1.close()
port2.close()
```

```js
// worker.mjs
export default ({ port }) => {
  return new Promise((resolve) => {
    port.on('message', (message) => {
      console.log('Worker received:', message)

      port.postMessage('Hello from worker thread!')
      resolve()
    })
  })
}
```

</details>

### Using `node:child_process`

#### Basic usage

<details>
  <summary>See code</summary>

```js
// main.mjs
import Tinypool from 'tinypool'

const pool = new Tinypool({
  runtime: 'child_process',
  filename: new URL('./worker.mjs', import.meta.url).href,
})
const result = await pool.run({ a: 4, b: 6 })
console.log(result) // Prints 10
```

```js
// worker.mjs
export default ({ a, b }) => {
  return a + b
}
```

</details>

#### Main process <-> worker process communication

<details>
  <summary>See code</summary>

```js
// main.mjs
import Tinypool from 'tinypool'

const pool = new Tinypool({
  runtime: 'child_process',
  filename: new URL('./worker.mjs', import.meta.url).href,
})

const messages = []
const listeners = []
const channel = {
  onMessage: (listener) => listeners.push(listener),
  postMessage: (message) => messages.push(message),
}

const promise = pool.run({}, { channel })

// Send message to worker
setTimeout(
  () => listeners.forEach((listener) => listener('Hello from main process')),
  1000
)

// Wait for task to finish
await promise

console.log(messages)
// [{ received: 'Hello from main process', response: 'Hello from worker' }]
```

```js
// worker.mjs
export default async function run() {
  return new Promise((resolve) => {
    process.on('message', (message) => {
      // Ignore Tinypool's internal messages
      if (message?.__tinypool_worker_message__) return

      process.send({ received: message, response: 'Hello from worker' })
      resolve()
    })
  })
}
```

</details>

## API

We have a similar API to Piscina, so for more information, you can read Piscina's detailed [documentation](https://github.com/piscinajs/piscina#piscina---the-nodejs-worker-pool) and apply the same techniques here.

### Tinypool specific APIs

#### Pool constructor options

- `isolateWorkers`: Disabled by default. Always starts with a fresh worker when running tasks to isolate the environment.
- `terminateTimeout`: Disabled by default. If terminating a worker takes `terminateTimeout` amount of milliseconds to execute, an error is raised.
- `maxMemoryLimitBeforeRecycle`: Disabled by default. When defined, the worker's heap memory usage is compared against this value after task has been finished. If the current memory usage exceeds this limit, worker is terminated and a new one is started to take its place. This option is useful when your tasks leak memory and you don't want to enable `isolateWorkers` option.
- `runtime`: Used to pick worker runtime. Default value is `worker_threads`.
  - `worker_threads`: Runs workers in [`node:worker_threads`](https://nodejs.org/api/worker_threads.html). For `main thread <-> worker thread` communication you can use [`MessagePort`](https://nodejs.org/api/worker_threads.html#class-messageport) in the `pool.run()` method's [`transferList` option](https://nodejs.org/api/worker_threads.html#portpostmessagevalue-transferlist). See [example](#main-thread---worker-thread-communication).
  - `child_process`: Runs workers in [`node:child_process`](https://nodejs.org/api/child_process.html). For `main thread <-> worker process` communication you can use `TinypoolChannel` in the `pool.run()` method's `channel` option. For filtering out the Tinypool's internal messages see `TinypoolWorkerMessage`. See [example](#main-process---worker-process-communication).
- `teardown`: name of the function in file that should be called before worker is terminated. Must be named exported.
- `serialization`: Specify the kind of serialization used for the `child_process` runtime. Possible values are `'json'` and `'advanced'`. See Node.js [Advanced serialization](https://nodejs.org/docs/latest/api/child_process.html#advanced-serialization) for more details.

#### Pool methods

- `cancelPendingTasks()`: Gracefully cancels all pending tasks without stopping or interfering with on-going tasks. This method is useful when your tasks may have side effects and should not be terminated forcefully during task execution. If your tasks don't have any side effects you may want to use [`{ signal }`](https://github.com/piscinajs/piscina#cancelable-tasks) option for forcefully terminating all tasks, including the on-going ones, instead.
- `recycleWorkers(options)`: Waits for all current tasks to finish and re-creates all workers. Can be used to force isolation imperatively even when `isolateWorkers` is disabled. Accepts `{ runtime }` option as argument.

#### Exports

- `workerId`: Each worker now has an id ( <= `maxThreads`) that can be imported from `tinypool` in the worker itself (or `process.__tinypool_state__.workerId`).

## Authors

| <a href="https://github.com/Aslemammad"> <img width='150' src="https://avatars.githubusercontent.com/u/37929992?v=4" /><br> Mohammad Bagher </a> |
| ------------------------------------------------------------------------------------------------------------------------------------------------ |

## Sponsors

Your sponsorship can make a huge difference in continuing our work in open source!

<p align="center">
  <a href="https://cdn.jsdelivr.net/gh/aslemammad/static/sponsors.svg">
    <img src='https://cdn.jsdelivr.net/gh/aslemammad/static/sponsors.svg'/>
  </a>
</p>

## Credits

[The Vitest team](https://vitest.dev/) for giving me the chance of creating and maintaing this project for vitest.

[Piscina](https://github.com/piscinajs/piscina), because Tinypool is not more than a friendly fork of piscina.


================================================
FILE: benchmark/fixtures/add-process.mjs
================================================
import add from './add.mjs'

process.on('message', (message) => {
  process.send(add(message))
})


================================================
FILE: benchmark/fixtures/add-worker.mjs
================================================
import { parentPort } from 'node:worker_threads'

import add from './add.mjs'

parentPort.on('message', (message) => {
  parentPort.postMessage(add(message))
})


================================================
FILE: benchmark/fixtures/add.mjs
================================================
export default ({ a, b }) => a + b


================================================
FILE: benchmark/isolate-benchmark.bench.ts
================================================
import { bench } from 'vitest'
import { cpus } from 'node:os'
import { Worker } from 'node:worker_threads'
import { fork } from 'node:child_process'
import Tinypool, { type Options } from '../dist/index'

const THREADS = cpus().length - 1
const ROUNDS = THREADS * 10
const ITERATIONS = 100

for (const runtime of [
  'worker_threads',
  'child_process',
] as Options['runtime'][]) {
  bench(
    `Tinypool { runtime: '${runtime}' }`,
    async () => {
      const pool = new Tinypool({
        runtime,
        filename: './benchmark/fixtures/add.mjs',
        isolateWorkers: true,
        minThreads: THREADS,
        maxThreads: THREADS,
      })

      await Promise.all(
        Array(ROUNDS)
          .fill(0)
          .map(() => pool.run({ a: 1, b: 2 }))
      )

      await pool.destroy()
    },
    { iterations: ITERATIONS }
  )
}

for (const { task, name } of [
  { name: 'worker_threads', task: workerThreadTask },
  { name: 'child_process', task: childProcessTask },
] as const) {
  bench(
    `node:${name}`,
    async () => {
      const pool = Array(ROUNDS).fill(task)

      await Promise.all(
        Array(THREADS)
          .fill(execute)
          .map((_task) => _task())
      )

      async function execute() {
        const _task = pool.shift()

        if (_task) {
          await _task()
          return execute()
        }
      }
    },
    { iterations: ITERATIONS }
  )
}

async function workerThreadTask() {
  const worker = new Worker('./benchmark/fixtures/add-worker.mjs')
  const onMessage = new Promise<void>((resolve, reject) =>
    worker.on('message', (sum) => (sum === 3 ? resolve() : reject('Not 3')))
  )

  worker.postMessage({ a: 1, b: 2 })
  await onMessage

  await worker.terminate()
}

async function childProcessTask() {
  const subprocess = fork('./benchmark/fixtures/add-process.mjs')

  const onExit = new Promise((resolve) => subprocess.on('exit', resolve))
  const onMessage = new Promise<void>((resolve, reject) =>
    subprocess.on('message', (sum) => (sum === 3 ? resolve() : reject('Not 3')))
  )

  subprocess.send({ a: 1, b: 2 })
  await onMessage

  subprocess.kill()
  await onExit
}


================================================
FILE: benchmark/simple.bench.ts
================================================
import { bench } from 'vitest'
import Tinypool from '../dist/index'

bench(
  'simple',
  async () => {
    const pool = new Tinypool({
      filename: './benchmark/fixtures/add.mjs',
    })

    const tasks: Promise<void>[] = []

    while (pool.queueSize === 0) {
      tasks.push(pool.run({ a: 4, b: 6 }))
    }

    await Promise.all(tasks)
    await pool.destroy()
  },
  { time: 10_000 }
)


================================================
FILE: eslint.config.js
================================================
import { readFileSync } from 'node:fs'
import eslint from '@eslint/js'
import tseslint from 'typescript-eslint'
import eslintPluginUnicorn from 'eslint-plugin-unicorn'
import eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended'

const tsconfig = JSON.parse(readFileSync('./tsconfig.json', 'utf8'))

export default defineConfig([
  eslint.configs.recommended,
  ...tseslint.configs.recommended,
  ...tseslint.configs.recommendedTypeChecked.map((config) => ({
    ...config,
    files: tsconfig.include,
  })),
  {
    files: tsconfig.include,
    languageOptions: {
      parserOptions: {
        project: true,
        tsconfigRootDir: import.meta.dirname,
      },
    },
  },
  {
    languageOptions: {
      globals: {
        process: 'readonly',
      },
    },
    plugins: { unicorn: eslintPluginUnicorn },
    rules: {
      'unicorn/prefer-node-protocol': 'error',
      '@typescript-eslint/no-unused-vars': [
        'error',
        { varsIgnorePattern: '^_' },
      ],
      '@typescript-eslint/consistent-type-imports': [
        'error',
        {
          prefer: 'type-imports',
          fixStyle: 'inline-type-imports',
          disallowTypeAnnotations: false,
        },
      ],

      // TODO: Nice-to-have rules
      '@typescript-eslint/no-unsafe-argument': 'off',
      '@typescript-eslint/no-unsafe-assignment': 'off',
      '@typescript-eslint/no-explicit-any': 'off',
      '@typescript-eslint/no-unsafe-member-access': 'off',
      '@typescript-eslint/no-unsafe-return': 'off',
      '@typescript-eslint/no-redundant-type-constituents': 'off',
      '@typescript-eslint/no-non-null-asserted-optional-chain': 'off',
      '@typescript-eslint/no-namespace': 'off',
    },
  },
  {
    files: ['**/*.test.ts'],
    rules: {
      '@typescript-eslint/require-await': 'off',
    },
  },
  { ignores: ['dist'] },
  eslintPluginPrettierRecommended,
])

/** @param config {import('eslint').Linter.Config} */
function defineConfig(config) {
  return config
}


================================================
FILE: global.d.ts
================================================
// only for tsdown build, excluded from the final tgz
declare namespace NodeJS {
  interface Process {
    __tinypool_state__: {
      isTinypoolWorker: boolean
      isWorkerThread?: boolean
      isChildProcess?: boolean
      workerData: any
      workerId: number
    }
  }
}


================================================
FILE: package.json
================================================
{
  "name": "tinypool",
  "type": "module",
  "version": "2.1.0",
  "packageManager": "pnpm@9.0.6",
  "description": "A minimal and tiny Node.js Worker Thread Pool implementation, a fork of piscina, but with fewer features",
  "license": "MIT",
  "homepage": "https://github.com/tinylibs/tinypool#readme",
  "repository": {
    "type": "git",
    "url": "https://github.com/tinylibs/tinypool.git"
  },
  "bugs": {
    "url": "https://github.com/tinylibs/tinypool/issues"
  },
  "keywords": [
    "fast",
    "worker threads",
    "thread pool"
  ],
  "exports": {
    ".": {
      "types": "./dist/index.d.ts",
      "default": "./dist/index.js"
    },
    "./package.json": "./package.json"
  },
  "main": "./dist/index.js",
  "module": "./dist/index.js",
  "types": "./dist/index.d.ts",
  "files": [
    "dist"
  ],
  "engines": {
    "node": "^20.0.0 || >=22.0.0"
  },
  "scripts": {
    "test": "vitest",
    "bench": "vitest bench",
    "dev": "tsdown --watch ./src",
    "build": "tsdown",
    "publish": "clean-publish",
    "lint": "eslint --max-warnings=0",
    "typecheck": "tsc --noEmit"
  },
  "devDependencies": {
    "@types/node": "^20.12.8",
    "clean-publish": "^3.4.4",
    "eslint": "^9.4.0",
    "eslint-config-prettier": "^9.1.0",
    "eslint-plugin-prettier": "^5.1.3",
    "eslint-plugin-unicorn": "^53.0.0",
    "prettier": "^3.3.2",
    "tsdown": "^0.11.3",
    "typescript": "^5.4.5",
    "typescript-eslint": "^7.13.0",
    "vite": "^5.2.11",
    "vitest": "^4.0.1"
  }
}


================================================
FILE: src/common.ts
================================================
import type { MessagePort, TransferListItem } from 'node:worker_threads'
import type { SerializationType } from 'node:child_process'

/** Channel for communicating between main thread and workers */
export interface TinypoolChannel {
  /** Workers subscribing to messages */
  onMessage?: (callback: (message: any) => void) => void

  /** Called with worker's messages */
  postMessage?: (message: any) => void

  /** Called when channel can be closed */
  onClose?: () => void
}

export interface TinypoolWorker {
  runtime: string
  initialize(options: {
    env?: Record<string, string>
    argv?: string[]
    execArgv?: string[]
    resourceLimits?: any
    workerData: TinypoolData
    trackUnmanagedFds?: boolean
    serialization?: SerializationType
  }): void
  terminate(): Promise<any>
  postMessage(message: any, transferListItem?: TransferListItem[]): void
  setChannel?: (channel: TinypoolChannel) => void
  on(event: string, listener: (...args: any[]) => void): void
  once(event: string, listener: (...args: any[]) => void): void
  emit(event: string, ...data: any[]): void
  ref?: () => void
  unref?: () => void
  threadId: number
}

/**
 * Tinypool's internal messaging between main thread and workers.
 * - Utilizers can use `__tinypool_worker_message__` property to identify
 *   these messages and ignore them.
 */
export interface TinypoolWorkerMessage<
  T extends 'port' | 'pool' = 'port' | 'pool',
> {
  __tinypool_worker_message__: true
  source: T
}

export interface StartupMessage {
  filename: string | null
  name: string
  port: MessagePort
  sharedBuffer: Int32Array
  useAtomics: boolean
}

export interface RequestMessage {
  taskId: number
  task: any
  filename: string
  name: string
}

export interface ReadyMessage {
  ready: true
}

export interface ResponseMessage {
  taskId: number
  result: any
  error: unknown | null
  usedMemory: number
}

export interface TinypoolPrivateData {
  workerId: number
}

export type TinypoolData = [TinypoolPrivateData, any] // [{ ... }, workerData]

// Internal symbol used to mark Transferable objects returned
// by the Tinypool.move() function
const kMovable = Symbol('Tinypool.kMovable')
export const kTransferable = Symbol.for('Tinypool.transferable')
export const kValue = Symbol.for('Tinypool.valueOf')
export const kQueueOptions = Symbol.for('Tinypool.queueOptions')

// True if the object implements the Transferable interface
export function isTransferable(value: any): boolean {
  return (
    value != null &&
    typeof value === 'object' &&
    kTransferable in value &&
    kValue in value
  )
}

// True if object implements Transferable and has been returned
// by the Tinypool.move() function
export function isMovable(value: any): boolean {
  return isTransferable(value) && value[kMovable] === true
}

export function markMovable(value: object): void {
  Object.defineProperty(value, kMovable, {
    enumerable: false,
    configurable: true,
    writable: true,
    value: true,
  })
}

export interface Transferable {
  readonly [kTransferable]: object
  readonly [kValue]: object
}

export interface Task {
  readonly [kQueueOptions]: object | null
  cancel(): void
}

export interface TaskQueue {
  readonly size: number
  shift(): Task | null
  remove(task: Task): void
  push(task: Task): void
  cancel(): void
}

export function isTaskQueue(value: any): boolean {
  return (
    typeof value === 'object' &&
    value !== null &&
    'size' in value &&
    typeof value.shift === 'function' &&
    typeof value.remove === 'function' &&
    typeof value.push === 'function'
  )
}

export const kRequestCountField = 0
export const kResponseCountField = 1
export const kFieldCount = 2


================================================
FILE: src/entry/process.ts
================================================
import { stderr, stdout } from '../utils'
import {
  type ReadyMessage,
  type RequestMessage,
  type ResponseMessage,
  type StartupMessage,
  type TinypoolWorkerMessage,
} from '../common'
import { getHandler, throwInNextTick } from './utils'

type IncomingMessage =
  | (StartupMessage & TinypoolWorkerMessage<'pool'>)
  | (RequestMessage & TinypoolWorkerMessage<'port'>)

type OutgoingMessage =
  | (ReadyMessage & TinypoolWorkerMessage<'pool'>)
  | (ResponseMessage & TinypoolWorkerMessage<'port'>)

process.__tinypool_state__ = {
  isChildProcess: true,
  isTinypoolWorker: true,
  workerData: null,
  workerId: Number(process.env.TINYPOOL_WORKER_ID),
}

const memoryUsage = process.memoryUsage.bind(process)
const send = process.send!.bind(process)

process.on('message', (message: IncomingMessage) => {
  // Message was not for port or pool
  // It's likely end-users own communication between main and worker
  if (!message || !message.__tinypool_worker_message__) return

  if (message.source === 'pool') {
    const { filename, name } = message

    ;(async function () {
      if (filename !== null) {
        await getHandler(filename, name)
      }

      send(
        <OutgoingMessage>{
          ready: true,
          source: 'pool',
          __tinypool_worker_message__: true,
        },
        () => {
          // Ignore errors coming from closed channel
        }
      )
    })().catch(throwInNextTick)

    return
  }

  if (message.source === 'port') {
    onMessage(message).catch(throwInNextTick)
    return
  }

  throw new Error(`Unexpected TinypoolWorkerMessage ${JSON.stringify(message)}`)
})

async function onMessage(message: IncomingMessage & { source: 'port' }) {
  const { taskId, task, filename, name } = message
  let response: OutgoingMessage & Pick<typeof message, 'source'>

  try {
    const handler = await getHandler(filename, name)
    if (handler === null) {
      throw new Error(
        `No handler function "${name}" exported from "${filename}"`
      )
    }
    const result = await handler(task)
    response = {
      source: 'port',
      __tinypool_worker_message__: true,
      taskId,
      result,
      error: null,
      usedMemory: memoryUsage().heapUsed,
    }

    // If the task used e.g. console.log(), wait for the stream to drain
    // before potentially entering the `Atomics.wait()` loop, and before
    // returning the result so that messages will always be printed even
    // if the process would otherwise be ready to exit.
    if (stdout()?.writableLength! > 0) {
      await new Promise((resolve) => process.stdout.write('', resolve))
    }
    if (stderr()?.writableLength! > 0) {
      await new Promise((resolve) => process.stderr.write('', resolve))
    }
  } catch (error) {
    response = {
      source: 'port',
      __tinypool_worker_message__: true,
      taskId,
      result: null,
      error: serializeError(error),
      usedMemory: memoryUsage().heapUsed,
    }
  }

  send(response)
}

function serializeError(error: unknown) {
  if (error instanceof Error) {
    return {
      ...error,
      name: error.name,
      stack: error.stack,
      message: error.message,
    }
  }

  return String(error)
}


================================================
FILE: src/entry/utils.ts
================================================
import { pathToFileURL } from 'node:url'

// Get `import(x)` as a function that isn't transpiled to `require(x)` by
// TypeScript for dual ESM/CJS support.
// Load this lazily, so that there is no warning about the ESM loader being
// experimental (on Node v12.x) until we actually try to use it.
let importESMCached: (specifier: string) => Promise<any> | undefined

function getImportESM() {
  if (importESMCached === undefined) {
    // eslint-disable-next-line @typescript-eslint/no-implied-eval -- intentional
    importESMCached = new Function(
      'specifier',
      'return import(specifier)'
    ) as typeof importESMCached
  }
  return importESMCached
}

// eslint-disable-next-line @typescript-eslint/ban-types -- Intentional general type
type Handler = Function
const handlerCache: Map<string, Handler> = new Map()

// Look up the handler function that we call when a task is posted.
// This is either going to be "the" export from a file, or the default export.
export async function getHandler(
  filename: string,
  name: string
): Promise<Handler | null> {
  let handler = handlerCache.get(`${filename}/${name}`)
  if (handler !== undefined) {
    return handler
  }

  try {
    const handlerModule = await import(filename)

    // Check if the default export is an object, because dynamic import
    // resolves with `{ default: { default: [Function] } }` for CJS modules.
    handler =
      (typeof handlerModule.default !== 'function' && handlerModule.default) ||
      handlerModule

    if (typeof handler !== 'function') {
      handler = await (handler as any)[name]
    }
  } catch {
    // Ignore error and retry import
  }
  if (typeof handler !== 'function') {
    handler = await getImportESM()(pathToFileURL(filename).href)
    if (typeof handler !== 'function') {
      handler = await (handler as any)[name]
    }
  }
  if (typeof handler !== 'function') {
    return null
  }

  // Limit the handler cache size. This should not usually be an issue and is
  // only provided for pathological cases.
  if (handlerCache.size > 1000) {
    const [handler] = handlerCache
    const key = handler![0]
    handlerCache.delete(key)
  }

  handlerCache.set(`${filename}/${name}`, handler)
  return handler
}

export function throwInNextTick(error: Error) {
  process.nextTick(() => {
    throw error
  })
}


================================================
FILE: src/entry/worker.ts
================================================
import {
  parentPort,
  type MessagePort,
  receiveMessageOnPort,
  workerData as tinypoolData,
} from 'node:worker_threads'
import {
  type ReadyMessage,
  type RequestMessage,
  type ResponseMessage,
  type StartupMessage,
  type TinypoolData,
  kResponseCountField,
  kRequestCountField,
  isMovable,
  kTransferable,
  kValue,
} from '../common'
import { stderr, stdout } from '../utils'
import { getHandler, throwInNextTick } from './utils'

const [tinypoolPrivateData, workerData] = tinypoolData as TinypoolData

process.__tinypool_state__ = {
  isWorkerThread: true,
  isTinypoolWorker: true,
  workerData: workerData,
  workerId: tinypoolPrivateData.workerId,
}

const memoryUsage = process.memoryUsage.bind(process)
let useAtomics: boolean = process.env.PISCINA_DISABLE_ATOMICS !== '1'

// We should only receive this message once, when the Worker starts. It gives
// us the MessagePort used for receiving tasks, a SharedArrayBuffer for fast
// communication using Atomics, and the name of the default filename for tasks
// (so we can pre-load and cache the handler).
parentPort!.on('message', (message: StartupMessage) => {
  useAtomics =
    process.env.PISCINA_DISABLE_ATOMICS === '1' ? false : message.useAtomics

  const { port, sharedBuffer, filename, name } = message

  ;(async function () {
    if (filename !== null) {
      await getHandler(filename, name)
    }

    const readyMessage: ReadyMessage = { ready: true }
    parentPort!.postMessage(readyMessage)

    port.start()

    port.on('message', onMessage.bind(null, port, sharedBuffer))
    atomicsWaitLoop(port, sharedBuffer)
  })().catch(throwInNextTick)
})

let currentTasks: number = 0
let lastSeenRequestCount: number = 0
function atomicsWaitLoop(port: MessagePort, sharedBuffer: Int32Array) {
  if (!useAtomics) return

  // This function is entered either after receiving the startup message, or
  // when we are done with a task. In those situations, the *only* thing we
  // expect to happen next is a 'message' on `port`.
  // That call would come with the overhead of a C++ → JS boundary crossing,
  // including async tracking. So, instead, if there is no task currently
  // running, we wait for a signal from the parent thread using Atomics.wait(),
  // and read the message from the port instead of generating an event,
  // in order to avoid that overhead.
  // The one catch is that this stops asynchronous operations that are still
  // running from proceeding. Generally, tasks should not spawn asynchronous
  // operations without waiting for them to finish, though.
  while (currentTasks === 0) {
    // Check whether there are new messages by testing whether the current
    // number of requests posted by the parent thread matches the number of
    // requests received.
    Atomics.wait(sharedBuffer, kRequestCountField, lastSeenRequestCount)
    lastSeenRequestCount = Atomics.load(sharedBuffer, kRequestCountField)

    // We have to read messages *after* updating lastSeenRequestCount in order
    // to avoid race conditions.
    let entry
    while ((entry = receiveMessageOnPort(port)) !== undefined) {
      onMessage(port, sharedBuffer, entry.message)
    }
  }
}

function onMessage(
  port: MessagePort,
  sharedBuffer: Int32Array,
  message: RequestMessage
) {
  currentTasks++
  const { taskId, task, filename, name } = message

  ;(async function () {
    let response: ResponseMessage
    let transferList: any[] = []
    try {
      const handler = await getHandler(filename, name)
      if (handler === null) {
        throw new Error(
          `No handler function "${name}" exported from "${filename}"`
        )
      }
      let result = await handler(task)
      if (isMovable(result)) {
        transferList = transferList.concat(result[kTransferable])
        result = result[kValue]
      }
      response = {
        taskId,
        result: result,
        error: null,
        usedMemory: memoryUsage().heapUsed,
      }

      // If the task used e.g. console.log(), wait for the stream to drain
      // before potentially entering the `Atomics.wait()` loop, and before
      // returning the result so that messages will always be printed even
      // if the process would otherwise be ready to exit.
      if (stdout()?.writableLength! > 0) {
        await new Promise((resolve) => process.stdout.write('', resolve))
      }
      if (stderr()?.writableLength! > 0) {
        await new Promise((resolve) => process.stderr.write('', resolve))
      }
    } catch (error) {
      response = {
        taskId,
        result: null,
        // It may be worth taking a look at the error cloning algorithm we
        // use in Node.js core here, it's quite a bit more flexible
        error,
        usedMemory: memoryUsage().heapUsed,
      }
    }
    currentTasks--

    // Post the response to the parent thread, and let it know that we have
    // an additional message available. If possible, use Atomics.wait()
    // to wait for the next message.
    port.postMessage(response, transferList)
    Atomics.add(sharedBuffer, kResponseCountField, 1)
    atomicsWaitLoop(port, sharedBuffer)
  })().catch(throwInNextTick)
}


================================================
FILE: src/index.ts
================================================
import {
  MessageChannel,
  type MessagePort,
  receiveMessageOnPort,
} from 'node:worker_threads'
import type { SerializationType } from 'node:child_process'
import { once, EventEmitterAsyncResource } from 'node:events'
import { AsyncResource } from 'node:async_hooks'
import { fileURLToPath, URL } from 'node:url'
import { join } from 'node:path'
import { inspect, types } from 'node:util'
import assert from 'node:assert'
import { performance } from 'node:perf_hooks'
import { readFileSync } from 'node:fs'
import { availableParallelism } from 'node:os'
import {
  type ReadyMessage,
  type RequestMessage,
  type ResponseMessage,
  type StartupMessage,
  kResponseCountField,
  kRequestCountField,
  kFieldCount,
  type Transferable,
  type Task,
  type TaskQueue,
  kQueueOptions,
  isTransferable,
  markMovable,
  isMovable,
  kTransferable,
  kValue,
  type TinypoolData,
  type TinypoolWorker,
  type TinypoolChannel,
} from './common'
import ThreadWorker from './runtime/thread-worker'
import ProcessWorker from './runtime/process-worker'

declare global {
  namespace NodeJS {
    interface Process {
      __tinypool_state__: {
        isTinypoolWorker: boolean
        isWorkerThread?: boolean
        isChildProcess?: boolean
        workerData: any
        workerId: number
      }
    }
  }
}

const cpuCount: number = availableParallelism()

interface AbortSignalEventTargetAddOptions {
  once: boolean
}

interface AbortSignalEventTarget {
  addEventListener: (
    name: 'abort',
    listener: () => void,
    options?: AbortSignalEventTargetAddOptions
  ) => void
  removeEventListener: (name: 'abort', listener: () => void) => void
  aborted?: boolean
}
interface AbortSignalEventEmitter {
  off: (name: 'abort', listener: () => void) => void
  once: (name: 'abort', listener: () => void) => void
}
type AbortSignalAny = AbortSignalEventTarget | AbortSignalEventEmitter
function onabort(abortSignal: AbortSignalAny, listener: () => void) {
  if ('addEventListener' in abortSignal) {
    abortSignal.addEventListener('abort', listener, { once: true })
  } else {
    abortSignal.once('abort', listener)
  }
}
class AbortError extends Error {
  constructor() {
    super('The task has been aborted')
  }

  get name() {
    return 'AbortError'
  }
}

class CancelError extends Error {
  constructor() {
    super('The task has been cancelled')
  }

  get name() {
    return 'CancelError'
  }
}

type ResourceLimits = Worker extends {
  resourceLimits?: infer T
}
  ? T
  : object

class ArrayTaskQueue implements TaskQueue {
  tasks: Task[] = []

  get size() {
    return this.tasks.length
  }

  shift(): Task | null {
    return this.tasks.shift() as Task
  }

  push(task: Task): void {
    this.tasks.push(task)
  }

  remove(task: Task): void {
    const index = this.tasks.indexOf(task)
    assert.notStrictEqual(index, -1)
    this.tasks.splice(index, 1)
  }

  cancel(): void {
    while (this.tasks.length > 0) {
      const task = this.tasks.pop()
      task?.cancel()
    }
  }
}

interface Options {
  filename?: string | null
  runtime?: 'worker_threads' | 'child_process'
  name?: string
  minThreads?: number
  maxThreads?: number
  idleTimeout?: number
  terminateTimeout?: number
  maxQueue?: number | 'auto'
  concurrentTasksPerWorker?: number
  useAtomics?: boolean
  resourceLimits?: ResourceLimits
  maxMemoryLimitBeforeRecycle?: number
  argv?: string[]
  execArgv?: string[]
  env?: Record<string, string>
  workerData?: any
  taskQueue?: TaskQueue
  trackUnmanagedFds?: boolean
  isolateWorkers?: boolean
  teardown?: string
  serialization?: SerializationType
}

interface FilledOptions extends Options {
  filename: string | null
  name: string
  runtime: NonNullable<Options['runtime']>
  minThreads: number
  maxThreads: number
  idleTimeout: number
  maxQueue: number
  concurrentTasksPerWorker: number
  useAtomics: boolean
  taskQueue: TaskQueue
}

const kDefaultOptions: FilledOptions = {
  filename: null,
  name: 'default',
  runtime: 'worker_threads',
  minThreads: Math.max(cpuCount / 2, 1),
  maxThreads: cpuCount,
  idleTimeout: 0,
  maxQueue: Infinity,
  concurrentTasksPerWorker: 1,
  useAtomics: true,
  taskQueue: new ArrayTaskQueue(),
  trackUnmanagedFds: true,
}

interface RunOptions {
  transferList?: TransferList
  channel?: TinypoolChannel
  filename?: string | null
  signal?: AbortSignalAny | null
  name?: string | null
  runtime?: Options['runtime']
}

interface FilledRunOptions extends RunOptions {
  transferList: TransferList | never
  filename: string | null
  signal: AbortSignalAny | null
  name: string | null
}

const kDefaultRunOptions: FilledRunOptions = {
  transferList: undefined,
  filename: null,
  signal: null,
  name: null,
}

class DirectlyTransferable implements Transferable {
  #value: object
  constructor(value: object) {
    this.#value = value
  }

  get [kTransferable](): object {
    return this.#value
  }

  get [kValue](): object {
    return this.#value
  }
}

class ArrayBufferViewTransferable implements Transferable {
  #view: ArrayBufferView
  constructor(view: ArrayBufferView) {
    this.#view = view
  }

  get [kTransferable](): object {
    return this.#view.buffer
  }

  get [kValue](): object {
    return this.#view
  }
}

let taskIdCounter = 0

type TaskCallback = (err: Error, result: any) => void
// Grab the type of `transferList` off `MessagePort`. At the time of writing,
// only ArrayBuffer and MessagePort are valid, but let's avoid having to update
// our types here every time Node.js adds support for more objects.
type TransferList = MessagePort extends {
  postMessage(value: any, transferList: infer T): any
}
  ? T
  : never
type TransferListItem = TransferList extends (infer T)[] ? T : never

function maybeFileURLToPath(filename: string): string {
  return filename.startsWith('file:')
    ? fileURLToPath(new URL(filename))
    : filename
}

// Extend AsyncResource so that async relations between posting a task and
// receiving its result are visible to diagnostic tools.
class TaskInfo extends AsyncResource implements Task {
  callback: TaskCallback
  task: any
  transferList: TransferList
  channel?: TinypoolChannel
  filename: string
  name: string
  taskId: number
  abortSignal: AbortSignalAny | null
  abortListener: (() => void) | null = null
  workerInfo: WorkerInfo | null = null
  created: number
  started: number
  cancel: () => void

  constructor(
    task: any,
    transferList: TransferList,
    filename: string,
    name: string,
    callback: TaskCallback,
    abortSignal: AbortSignalAny | null,
    triggerAsyncId: number,
    channel?: TinypoolChannel
  ) {
    super('Tinypool.Task', { requireManualDestroy: true, triggerAsyncId })
    this.callback = callback
    this.task = task
    this.transferList = transferList
    this.cancel = () => this.callback(new CancelError(), null)
    this.channel = channel

    // If the task is a Transferable returned by
    // Tinypool.move(), then add it to the transferList
    // automatically
    if (isMovable(task)) {
      // This condition should never be hit but typescript
      // complains if we dont do the check.
      /* istanbul ignore if */
      if (this.transferList == null) {
        this.transferList = []
      }
      this.transferList = this.transferList.concat(task[kTransferable])
      this.task = task[kValue]
    }

    this.filename = filename
    this.name = name
    this.taskId = taskIdCounter++
    this.abortSignal = abortSignal
    this.created = performance.now()
    this.started = 0
  }

  releaseTask(): any {
    const ret = this.task
    this.task = null
    return ret
  }

  done(err: unknown | null, result?: any): void {
    this.emitDestroy() // `TaskInfo`s are used only once.
    this.runInAsyncScope(this.callback, null, err, result)
    // If an abort signal was used, remove the listener from it when
    // done to make sure we do not accidentally leak.
    if (this.abortSignal && this.abortListener) {
      if ('removeEventListener' in this.abortSignal && this.abortListener) {
        this.abortSignal.removeEventListener('abort', this.abortListener)
      } else {
        ;(this.abortSignal as AbortSignalEventEmitter).off(
          'abort',
          this.abortListener
        )
      }
    }
  }

  get [kQueueOptions](): object | null {
    return kQueueOptions in this.task ? this.task[kQueueOptions] : null
  }
}

abstract class AsynchronouslyCreatedResource {
  onreadyListeners: (() => void)[] | null = []

  markAsReady(): void {
    const listeners = this.onreadyListeners
    assert(listeners !== null)
    this.onreadyListeners = null
    for (const listener of listeners) {
      listener()
    }
  }

  isReady(): boolean {
    return this.onreadyListeners === null
  }

  onReady(fn: () => void) {
    if (this.onreadyListeners === null) {
      fn() // Zalgo is okay here.
      return
    }
    this.onreadyListeners.push(fn)
  }

  abstract currentUsage(): number
}

class AsynchronouslyCreatedResourcePool<
  T extends AsynchronouslyCreatedResource,
> {
  pendingItems = new Set<T>()
  readyItems = new Set<T>()
  maximumUsage: number
  onAvailableListeners: ((item: T) => void)[]

  constructor(maximumUsage: number) {
    this.maximumUsage = maximumUsage
    this.onAvailableListeners = []
  }

  add(item: T) {
    this.pendingItems.add(item)
    item.onReady(() => {
      /* istanbul ignore else */
      if (this.pendingItems.has(item)) {
        this.pendingItems.delete(item)
        this.readyItems.add(item)
        this.maybeAvailable(item)
      }
    })
  }

  delete(item: T) {
    this.pendingItems.delete(item)
    this.readyItems.delete(item)
  }

  findAvailable(): T | null {
    let minUsage = this.maximumUsage
    let candidate = null
    for (const item of this.readyItems) {
      const usage = item.currentUsage()
      if (usage === 0) return item
      if (usage < minUsage) {
        candidate = item
        minUsage = usage
      }
    }
    return candidate
  }

  *[Symbol.iterator]() {
    yield* this.pendingItems
    yield* this.readyItems
  }

  get size() {
    return this.pendingItems.size + this.readyItems.size
  }

  maybeAvailable(item: T) {
    /* istanbul ignore else */
    if (item.currentUsage() < this.maximumUsage) {
      for (const listener of this.onAvailableListeners) {
        listener(item)
      }
    }
  }

  onAvailable(fn: (item: T) => void) {
    this.onAvailableListeners.push(fn)
  }
}

type ResponseCallback = (response: ResponseMessage) => void

const Errors = {
  ThreadTermination: () => new Error('Terminating worker thread'),
  FilenameNotProvided: () =>
    new Error('filename must be provided to run() or in options object'),
  TaskQueueAtLimit: () => new Error('Task queue is at limit'),
  NoTaskQueueAvailable: () =>
    new Error('No task queue available and all Workers are busy'),
}

class WorkerInfo extends AsynchronouslyCreatedResource {
  worker: TinypoolWorker
  workerId: number
  freeWorkerId: () => void
  taskInfos: Map<number, TaskInfo>
  idleTimeout: NodeJS.Timeout | null = null
  port: MessagePort
  sharedBuffer: Int32Array
  lastSeenResponseCount: number = 0
  usedMemory?: number
  onMessage: ResponseCallback
  shouldRecycle?: boolean
  filename?: string | null
  teardown?: string

  constructor(
    worker: TinypoolWorker,
    port: MessagePort,
    workerId: number,
    freeWorkerId: () => void,
    onMessage: ResponseCallback,
    filename?: string | null,
    teardown?: string
  ) {
    super()
    this.worker = worker
    this.workerId = workerId
    this.freeWorkerId = freeWorkerId
    this.teardown = teardown
    this.filename = filename
    this.port = port
    this.port.on('message', (message: ResponseMessage) =>
      this._handleResponse(message)
    )
    this.onMessage = onMessage
    this.taskInfos = new Map()
    this.sharedBuffer = new Int32Array(
      new SharedArrayBuffer(kFieldCount * Int32Array.BYTES_PER_ELEMENT)
    )
  }

  async destroy(timeout?: number): Promise<void> {
    let resolve: () => void
    let reject: (err: Error) => void

    const ret = new Promise<void>((res, rej) => {
      resolve = res
      reject = rej
    })

    if (this.teardown && this.filename) {
      const { teardown, filename } = this

      await new Promise((resolve, reject) => {
        this.postTask(
          new TaskInfo(
            {},
            [],
            filename,
            teardown,
            (error, result) => (error ? reject(error) : resolve(result)),
            null,
            1,
            undefined
          )
        )
      })
    }

    const timer = timeout
      ? setTimeout(
          () => reject(new Error('Failed to terminate worker')),
          timeout
        )
      : null

    void this.worker.terminate().then(() => {
      if (timer !== null) {
        clearTimeout(timer)
      }

      this.port.close()
      this.clearIdleTimeout()
      for (const taskInfo of this.taskInfos.values()) {
        taskInfo.done(Errors.ThreadTermination())
      }
      this.taskInfos.clear()

      resolve()
    })

    return ret
  }

  clearIdleTimeout(): void {
    if (this.idleTimeout !== null) {
      clearTimeout(this.idleTimeout)
      this.idleTimeout = null
    }
  }

  ref(): WorkerInfo {
    this.port.ref()
    return this
  }

  unref(): WorkerInfo {
    // Note: Do not call ref()/unref() on the Worker itself since that may cause
    // a hard crash, see https://github.com/nodejs/node/pull/33394.
    this.port.unref()
    return this
  }

  _handleResponse(message: ResponseMessage): void {
    this.usedMemory = message.usedMemory
    this.onMessage(message)

    if (this.taskInfos.size === 0) {
      // No more tasks running on this Worker means it should not keep the
      // process running.
      this.unref()
    }
  }

  postTask(taskInfo: TaskInfo) {
    assert(!this.taskInfos.has(taskInfo.taskId))
    const message: RequestMessage = {
      task: taskInfo.releaseTask(),
      taskId: taskInfo.taskId,
      filename: taskInfo.filename,
      name: taskInfo.name,
    }

    try {
      if (taskInfo.channel) {
        this.worker.setChannel?.(taskInfo.channel)
      }
      this.port.postMessage(message, taskInfo.transferList)
    } catch (err) {
      // This would mostly happen if e.g. message contains unserializable data
      // or transferList is invalid.
      taskInfo.done(err)
      return
    }

    taskInfo.workerInfo = this
    this.taskInfos.set(taskInfo.taskId, taskInfo)
    this.ref()
    this.clearIdleTimeout()

    // Inform the worker that there are new messages posted, and wake it up
    // if it is waiting for one.
    Atomics.add(this.sharedBuffer, kRequestCountField, 1)
    Atomics.notify(this.sharedBuffer, kRequestCountField, 1)
  }

  processPendingMessages() {
    // If we *know* that there are more messages than we have received using
    // 'message' events yet, then try to load and handle them synchronously,
    // without the need to wait for more expensive events on the event loop.
    // This would usually break async tracking, but in our case, we already have
    // the extra TaskInfo/AsyncResource layer that rectifies that situation.
    const actualResponseCount = Atomics.load(
      this.sharedBuffer,
      kResponseCountField
    )
    if (actualResponseCount !== this.lastSeenResponseCount) {
      this.lastSeenResponseCount = actualResponseCount

      let entry
      while ((entry = receiveMessageOnPort(this.port)) !== undefined) {
        this._handleResponse(entry.message)
      }
    }
  }

  isRunningAbortableTask(): boolean {
    // If there are abortable tasks, we are running one at most per Worker.
    if (this.taskInfos.size !== 1) return false
    const [first] = this.taskInfos
    const [, task] = first || []
    return task?.abortSignal !== null
  }

  currentUsage(): number {
    if (this.isRunningAbortableTask()) return Infinity
    return this.taskInfos.size
  }
}

class ThreadPool {
  publicInterface: Tinypool
  workers: AsynchronouslyCreatedResourcePool<WorkerInfo>
  workerIds: Map<number, boolean> // Map<workerId, isIdAvailable>
  options: FilledOptions
  taskQueue: TaskQueue
  skipQueue: TaskInfo[] = []
  completed: number = 0
  start: number = performance.now()
  inProcessPendingMessages: boolean = false
  startingUp: boolean = false
  workerFailsDuringBootstrap: boolean = false

  constructor(publicInterface: Tinypool, options: Options) {
    this.publicInterface = publicInterface
    this.taskQueue = options.taskQueue || new ArrayTaskQueue()

    const filename = options.filename
      ? maybeFileURLToPath(options.filename)
      : null
    this.options = { ...kDefaultOptions, ...options, filename, maxQueue: 0 }
    // The >= and <= could be > and < but this way we get 100 % coverage 🙃
    if (
      options.maxThreads !== undefined &&
      this.options.minThreads >= options.maxThreads
    ) {
      this.options.minThreads = options.maxThreads
    }
    if (
      options.minThreads !== undefined &&
      this.options.maxThreads <= options.minThreads
    ) {
      this.options.maxThreads = options.minThreads
    }
    if (options.maxQueue === 'auto') {
      this.options.maxQueue = this.options.maxThreads ** 2
    } else {
      this.options.maxQueue = options.maxQueue ?? kDefaultOptions.maxQueue
    }

    this.workerIds = new Map(
      new Array(this.options.maxThreads).fill(0).map((_, i) => [i + 1, true])
    )

    this.workers = new AsynchronouslyCreatedResourcePool<WorkerInfo>(
      this.options.concurrentTasksPerWorker
    )
    this.workers.onAvailable((w: WorkerInfo) => this._onWorkerAvailable(w))

    this.startingUp = true
    this._ensureMinimumWorkers()
    this.startingUp = false
  }
  _ensureEnoughWorkersForTaskQueue(): void {
    while (
      this.workers.size < this.taskQueue.size &&
      this.workers.size < this.options.maxThreads
    ) {
      this._addNewWorker()
    }
  }

  _ensureMaximumWorkers(): void {
    while (this.workers.size < this.options.maxThreads) {
      this._addNewWorker()
    }
  }

  _ensureMinimumWorkers(): void {
    while (this.workers.size < this.options.minThreads) {
      this._addNewWorker()
    }
  }

  _addNewWorker(): void {
    const workerIds = this.workerIds

    let workerId: number

    workerIds.forEach((isIdAvailable, _workerId) => {
      if (isIdAvailable && !workerId) {
        workerId = _workerId
        workerIds.set(_workerId, false)
      }
    })
    const tinypoolPrivateData = { workerId: workerId! }

    const worker =
      this.options.runtime === 'child_process'
        ? new ProcessWorker()
        : new ThreadWorker()

    worker.initialize({
      env: this.options.env,
      argv: this.options.argv,
      execArgv: this.options.execArgv,
      resourceLimits: this.options.resourceLimits,
      workerData: [
        tinypoolPrivateData,
        this.options.workerData,
      ] as TinypoolData,
      trackUnmanagedFds: this.options.trackUnmanagedFds,
      serialization: this.options.serialization,
    })

    const onMessage = (message: ResponseMessage) => {
      const { taskId, result } = message
      // In case of success: Call the callback that was passed to `runTask`,
      // remove the `TaskInfo` associated with the Worker, which marks it as
      // free again.
      const taskInfo = workerInfo.taskInfos.get(taskId)
      workerInfo.taskInfos.delete(taskId)

      // Mark worker as available if it's not about to be removed
      if (!this.shouldRecycleWorker(taskInfo)) {
        this.workers.maybeAvailable(workerInfo)
      }

      /* istanbul ignore if */
      if (taskInfo === undefined) {
        const err = new Error(
          `Unexpected message from Worker: ${inspect(message)}`
        )
        this.publicInterface.emit('error', err)
      } else {
        taskInfo.done(message.error, result)
      }

      this._processPendingMessages()
    }

    const { port1, port2 } = new MessageChannel()
    const workerInfo = new WorkerInfo(
      worker,
      port1,
      workerId!,
      () => workerIds.set(workerId, true),
      onMessage,
      this.options.filename,
      this.options.teardown
    )
    if (this.startingUp) {
      // There is no point in waiting for the initial set of Workers to indicate
      // that they are ready, we just mark them as such from the start.
      workerInfo.markAsReady()
    }

    const message: StartupMessage = {
      filename: this.options.filename,
      name: this.options.name,
      port: port2,
      sharedBuffer: workerInfo.sharedBuffer,
      useAtomics: this.options.useAtomics,
    }

    worker.postMessage(message, [port2])

    worker.on('message', (message: ReadyMessage) => {
      if (message.ready === true) {
        port1.start()

        if (workerInfo.currentUsage() === 0) {
          workerInfo.unref()
        }

        if (!workerInfo.isReady()) {
          workerInfo.markAsReady()
        }
        return
      }

      worker.emit(
        'error',
        new Error(`Unexpected message on Worker: ${inspect(message)}`)
      )
    })

    worker.on('error', (err: Error) => {
      // Work around the bug in https://github.com/nodejs/node/pull/33394
      worker.ref = () => {}

      // In case of an uncaught exception: Call the callback that was passed to
      // `postTask` with the error, or emit an 'error' event if there is none.
      const taskInfos = [...workerInfo.taskInfos.values()]
      workerInfo.taskInfos.clear()

      // Remove the worker from the list and potentially start a new Worker to
      // replace the current one.
      void this._removeWorker(workerInfo)

      if (workerInfo.isReady() && !this.workerFailsDuringBootstrap) {
        this._ensureMinimumWorkers()
      } else {
        // Do not start new workers over and over if they already fail during
        // bootstrap, there's no point.
        this.workerFailsDuringBootstrap = true
      }

      if (taskInfos.length > 0) {
        for (const taskInfo of taskInfos) {
          taskInfo.done(err, null)
        }
      } else {
        this.publicInterface.emit('error', err)
      }
    })

    worker.unref()
    port1.on('close', () => {
      // The port is only closed if the Worker stops for some reason, but we
      // always .unref() the Worker itself. We want to receive e.g. 'error'
      // events on it, so we ref it once we know it's going to exit anyway.
      worker.ref()
    })

    this.workers.add(workerInfo)
  }

  _processPendingMessages() {
    if (this.inProcessPendingMessages || !this.options.useAtomics) {
      return
    }

    this.inProcessPendingMessages = true
    try {
      for (const workerInfo of this.workers) {
        workerInfo.processPendingMessages()
      }
    } finally {
      this.inProcessPendingMessages = false
    }
  }

  _removeWorker(workerInfo: WorkerInfo): Promise<void> {
    workerInfo.freeWorkerId()

    this.workers.delete(workerInfo)

    return workerInfo.destroy(this.options.terminateTimeout)
  }

  _onWorkerAvailable(workerInfo: WorkerInfo): void {
    while (
      (this.taskQueue.size > 0 || this.skipQueue.length > 0) &&
      workerInfo.currentUsage() < this.options.concurrentTasksPerWorker
    ) {
      // The skipQueue will have tasks that we previously shifted off
      // the task queue but had to skip over... we have to make sure
      // we drain that before we drain the taskQueue.
      const taskInfo =
        this.skipQueue.shift() || (this.taskQueue.shift() as TaskInfo)
      // If the task has an abortSignal and the worker has any other
      // tasks, we cannot distribute the task to it. Skip for now.
      if (taskInfo.abortSignal && workerInfo.taskInfos.size > 0) {
        this.skipQueue.push(taskInfo)
        break
      }
      const now = performance.now()
      taskInfo.started = now
      workerInfo.postTask(taskInfo)
      this._maybeDrain()
      return
    }

    if (
      workerInfo.taskInfos.size === 0 &&
      this.workers.size > this.options.minThreads
    ) {
      workerInfo.idleTimeout = setTimeout(() => {
        assert.strictEqual(workerInfo.taskInfos.size, 0)
        if (this.workers.size > this.options.minThreads) {
          void this._removeWorker(workerInfo)
        }
      }, this.options.idleTimeout).unref()
    }
  }

  runTask(task: any, options: RunOptions): Promise<any> {
    let { filename, name } = options
    const { transferList = [], signal = null, channel } = options

    if (filename == null) {
      filename = this.options.filename
    }
    if (name == null) {
      name = this.options.name
    }
    if (typeof filename !== 'string') {
      return Promise.reject(Errors.FilenameNotProvided())
    }
    filename = maybeFileURLToPath(filename)

    let resolve: (result: any) => void
    let reject: (err: Error) => void

    const ret = new Promise((res, rej) => {
      resolve = res
      reject = rej
    })
    const taskInfo = new TaskInfo(
      task,
      transferList,
      filename,
      name,
      (err: Error | null, result: any) => {
        this.completed++
        if (err !== null) {
          reject(err)
        }

        if (this.shouldRecycleWorker(taskInfo)) {
          this._removeWorker(taskInfo.workerInfo!)
            .then(() => this._ensureMinimumWorkers())
            .then(() => this._ensureEnoughWorkersForTaskQueue())
            .then(() => resolve(result))
            .catch(reject)
        } else {
          resolve(result)
        }
      },
      signal,
      this.publicInterface.asyncResource.asyncId(),
      channel
    )

    if (signal !== null) {
      // If the AbortSignal has an aborted property and it's truthy,
      // reject immediately.
      if ((signal as AbortSignalEventTarget).aborted) {
        return Promise.reject(new AbortError())
      }
      taskInfo.abortListener = () => {
        // Call reject() first to make sure we always reject with the AbortError
        // if the task is aborted, not with an Error from the possible
        // thread termination below.
        reject(new AbortError())

        if (taskInfo.workerInfo !== null) {
          // Already running: We cancel the Worker this is running on.
          void this._removeWorker(taskInfo.workerInfo)
          this._ensureMinimumWorkers()
        } else {
          // Not yet running: Remove it from the queue.
          this.taskQueue.remove(taskInfo)
        }
      }
      onabort(signal, taskInfo.abortListener)
    }

    // If there is a task queue, there's no point in looking for an available
    // Worker thread. Add this task to the queue, if possible.
    if (this.taskQueue.size > 0) {
      const totalCapacity = this.options.maxQueue + this.pendingCapacity()
      if (this.taskQueue.size >= totalCapacity) {
        if (this.options.maxQueue === 0) {
          return Promise.reject(Errors.NoTaskQueueAvailable())
        } else {
          return Promise.reject(Errors.TaskQueueAtLimit())
        }
      } else {
        if (this.workers.size < this.options.maxThreads) {
          this._addNewWorker()
        }
        this.taskQueue.push(taskInfo)
      }

      return ret
    }

    // Look for a Worker with a minimum number of tasks it is currently running.
    let workerInfo: WorkerInfo | null = this.workers.findAvailable()

    // If we want the ability to abort this task, use only workers that have
    // no running tasks.
    if (workerInfo !== null && workerInfo.currentUsage() > 0 && signal) {
      workerInfo = null
    }

    // If no Worker was found, or that Worker was handling another task in some
    // way, and we still have the ability to spawn new threads, do so.
    let waitingForNewWorker = false
    if (
      (workerInfo === null || workerInfo.currentUsage() > 0) &&
      this.workers.size < this.options.maxThreads
    ) {
      this._addNewWorker()
      waitingForNewWorker = true
    }

    // If no Worker is found, try to put the task into the queue.
    if (workerInfo === null) {
      if (this.options.maxQueue <= 0 && !waitingForNewWorker) {
        return Promise.reject(Errors.NoTaskQueueAvailable())
      } else {
        this.taskQueue.push(taskInfo)
      }

      return ret
    }

    const now = performance.now()
    taskInfo.started = now
    workerInfo.postTask(taskInfo)
    this._maybeDrain()

    return ret
  }

  shouldRecycleWorker(taskInfo?: TaskInfo): boolean {
    // Worker could be set to recycle by pool's imperative methods
    if (taskInfo?.workerInfo?.shouldRecycle) {
      return true
    }

    // When `isolateWorkers` is enabled, remove the worker after task is finished
    if (this.options.isolateWorkers && taskInfo?.workerInfo) {
      return true
    }

    // When `maxMemoryLimitBeforeRecycle` is enabled, remove workers that have exceeded the memory limit
    if (
      !this.options.isolateWorkers &&
      this.options.maxMemoryLimitBeforeRecycle !== undefined &&
      (taskInfo?.workerInfo?.usedMemory || 0) >
        this.options.maxMemoryLimitBeforeRecycle
    ) {
      return true
    }

    return false
  }

  pendingCapacity(): number {
    return (
      this.workers.pendingItems.size * this.options.concurrentTasksPerWorker
    )
  }

  _maybeDrain() {
    if (this.taskQueue.size === 0 && this.skipQueue.length === 0) {
      this.publicInterface.emit('drain')
    }
  }

  async destroy() {
    while (this.skipQueue.length > 0) {
      const taskInfo: TaskInfo = this.skipQueue.shift() as TaskInfo
      taskInfo.done(new Error('Terminating worker thread'))
    }
    while (this.taskQueue.size > 0) {
      const taskInfo: TaskInfo = this.taskQueue.shift() as TaskInfo
      taskInfo.done(new Error('Terminating worker thread'))
    }

    const exitEvents: Promise<any[]>[] = []
    while (this.workers.size > 0) {
      const [workerInfo] = this.workers
      // @ts-expect-error -- TODO Fix
      exitEvents.push(once(workerInfo.worker, 'exit'))
      // @ts-expect-error -- TODO Fix
      void this._removeWorker(workerInfo)
    }

    await Promise.all(exitEvents)
  }

  async recycleWorkers(options: Pick<Options, 'runtime'> = {}) {
    const runtimeChanged =
      options?.runtime && options.runtime !== this.options.runtime

    if (options?.runtime) {
      this.options.runtime = options.runtime
    }

    // Worker's are automatically recycled when isolateWorkers is enabled.
    // Idle workers still need to be recycled if runtime changed
    if (this.options.isolateWorkers && !runtimeChanged) {
      return
    }

    const exitEvents: Promise<any[]>[] = []

    Array.from(this.workers).filter((workerInfo) => {
      // Remove idle workers
      if (workerInfo.currentUsage() === 0) {
        // @ts-expect-error -- TODO Fix
        exitEvents.push(once(workerInfo.worker, 'exit'))
        void this._removeWorker(workerInfo)
      }
      // Mark on-going workers for recycling.
      // Note that we don't need to wait for these ones to finish
      // as pool.shouldRecycleWorker will do it once task has finished
      else {
        workerInfo.shouldRecycle = true
      }
    })

    await Promise.all(exitEvents)

    this._ensureMinimumWorkers()
  }
}

class Tinypool extends EventEmitterAsyncResource {
  #pool: ThreadPool

  constructor(options: Options = {}) {
    // convert fractional option values to int
    if (
      options.minThreads !== undefined &&
      options.minThreads > 0 &&
      options.minThreads < 1
    ) {
      options.minThreads = Math.max(
        1,
        Math.floor(options.minThreads * cpuCount)
      )
    }
    if (
      options.maxThreads !== undefined &&
      options.maxThreads > 0 &&
      options.maxThreads < 1
    ) {
      options.maxThreads = Math.max(
        1,
        Math.floor(options.maxThreads * cpuCount)
      )
    }

    super({ ...options, name: 'Tinypool' })

    if (
      options.minThreads !== undefined &&
      options.maxThreads !== undefined &&
      options.minThreads > options.maxThreads
    ) {
      throw new RangeError(
        'options.minThreads and options.maxThreads must not conflict'
      )
    }

    this.#pool = new ThreadPool(this, options)
  }

  run(task: any, options: RunOptions = kDefaultRunOptions) {
    const { transferList, filename, name, signal, runtime, channel } = options

    return this.#pool.runTask(task, {
      transferList,
      filename,
      name,
      signal,
      runtime,
      channel,
    })
  }

  async destroy() {
    await this.#pool.destroy()
    this.emitDestroy()
  }

  get options(): FilledOptions {
    return this.#pool.options
  }

  get threads(): TinypoolWorker[] {
    const ret: TinypoolWorker[] = []
    for (const workerInfo of this.#pool.workers) {
      ret.push(workerInfo.worker)
    }
    return ret
  }

  get queueSize(): number {
    const pool = this.#pool
    return Math.max(pool.taskQueue.size - pool.pendingCapacity(), 0)
  }

  cancelPendingTasks() {
    const pool = this.#pool
    pool.taskQueue.cancel()
  }

  async recycleWorkers(options: Pick<Options, 'runtime'> = {}) {
    await this.#pool.recycleWorkers(options)
  }

  get completed(): number {
    return this.#pool.completed
  }

  get duration(): number {
    return performance.now() - this.#pool.start
  }

  static get isWorkerThread(): boolean {
    return process.__tinypool_state__?.isWorkerThread || false
  }

  static get workerData(): any {
    return process.__tinypool_state__?.workerData || undefined
  }

  static get version(): string {
    const { version } = JSON.parse(
      readFileSync(join(__dirname, '../package.json'), 'utf-8')
    ) as typeof import('../package.json')
    return version
  }

  static move(
    val:
      | Transferable
      | TransferListItem
      | ArrayBufferView
      | ArrayBuffer
      | MessagePort
  ) {
    if (val != null && typeof val === 'object' && typeof val !== 'function') {
      if (!isTransferable(val)) {
        if (types.isArrayBufferView(val)) {
          val = new ArrayBufferViewTransferable(val as ArrayBufferView)
        } else {
          val = new DirectlyTransferable(val)
        }
      }
      markMovable(val)
    }
    return val
  }

  static get transferableSymbol() {
    return kTransferable
  }

  static get valueSymbol() {
    return kValue
  }

  static get queueOptionsSymbol() {
    return kQueueOptions
  }
}

const _workerId = process.__tinypool_state__?.workerId

export * from './common'
export { Tinypool, Options, _workerId as workerId }
export default Tinypool


================================================
FILE: src/runtime/process-worker.ts
================================================
import { type ChildProcess, fork } from 'node:child_process'
import { MessagePort, type TransferListItem } from 'node:worker_threads'
import { fileURLToPath } from 'node:url'
import {
  type TinypoolChannel,
  type TinypoolWorker,
  type TinypoolWorkerMessage,
} from '../common'

const __tinypool_worker_message__ = true
const SIGKILL_TIMEOUT = 1000

export default class ProcessWorker implements TinypoolWorker {
  name = 'ProcessWorker'
  runtime = 'child_process'
  process!: ChildProcess
  threadId!: number
  port?: MessagePort
  channel?: TinypoolChannel
  waitForExit!: Promise<void>
  isTerminating = false

  initialize(options: Parameters<TinypoolWorker['initialize']>[0]) {
    this.process = fork(
      fileURLToPath(import.meta.url + '/../entry/process.js'),
      options.argv,
      {
        ...options,
        stdio: 'pipe',
        env: {
          ...options.env,
          TINYPOOL_WORKER_ID: options.workerData[0].workerId.toString(),
        },
      }
    )

    process.stdout.setMaxListeners(1 + process.stdout.getMaxListeners())
    process.stderr.setMaxListeners(1 + process.stderr.getMaxListeners())
    this.process.stdout?.pipe(process.stdout)
    this.process.stderr?.pipe(process.stderr)

    this.threadId = this.process.pid!

    this.process.on('exit', this.onUnexpectedExit)
    this.waitForExit = new Promise((r) => this.process.on('exit', r))
  }

  onUnexpectedExit = () => {
    this.process.emit('error', new Error('Worker exited unexpectedly'))
  }

  async terminate() {
    this.isTerminating = true
    this.process.off('exit', this.onUnexpectedExit)

    const sigkillTimeout = setTimeout(
      () => this.process.kill('SIGKILL'),
      SIGKILL_TIMEOUT
    )

    this.process.kill()
    await this.waitForExit

    this.process.stdout?.unpipe(process.stdout)
    this.process.stderr?.unpipe(process.stderr)
    this.port?.close()
    this.channel?.onClose?.()
    clearTimeout(sigkillTimeout)
  }

  setChannel(channel: TinypoolChannel) {
    // Previous channel exists in non-isolated runs
    if (this.channel && this.channel !== channel) {
      this.channel.onClose?.()
    }

    this.channel = channel

    // Mirror channel's messages to process
    this.channel.onMessage?.((message: any) => {
      this.send(message)
    })
  }

  private send(message: Parameters<NonNullable<(typeof process)['send']>>[0]) {
    if (!this.isTerminating) {
      this.process.send(message)
    }
  }

  postMessage(message: any, transferListItem?: Readonly<TransferListItem[]>) {
    transferListItem?.forEach((item) => {
      if (item instanceof MessagePort) {
        this.port = item
        this.port.start()
      }
    })

    // Mirror port's messages to process
    if (this.port) {
      this.port.on('message', (message) =>
        this.send(<TinypoolWorkerMessage<'port'>>{
          ...message,
          source: 'port',
          __tinypool_worker_message__,
        })
      )
    }

    return this.send(<TinypoolWorkerMessage<'pool'>>{
      ...message,
      source: 'pool',
      __tinypool_worker_message__,
    })
  }

  on(event: string, callback: (...args: any[]) => void) {
    return this.process.on(event, (data: TinypoolWorkerMessage) => {
      // All errors should be forwarded to the pool
      if (event === 'error') {
        return callback(data)
      }

      if (!data || !data.__tinypool_worker_message__) {
        return this.channel?.postMessage?.(data)
      }

      if (data.source === 'pool') {
        callback(data)
      } else if (data.source === 'port') {
        this.port!.postMessage(data)
      }
    })
  }

  once(event: string, callback: (...args: any[]) => void) {
    return this.process.once(event, callback)
  }

  emit(event: string, ...data: any[]) {
    return this.process.emit(event, ...data)
  }

  ref() {
    return this.process.ref()
  }

  unref() {
    this.port?.unref()

    // The forked child_process adds event listener on `process.on('message)`.
    // This requires manual unreffing of its channel.
    this.process.channel?.unref?.()

    if (hasUnref(this.process.stdout)) {
      this.process.stdout.unref()
    }

    if (hasUnref(this.process.stderr)) {
      this.process.stderr.unref()
    }

    return this.process.unref()
  }
}

// unref is untyped for some reason
function hasUnref(stream: null | object): stream is { unref: () => void } {
  return (
    stream != null && 'unref' in stream && typeof stream.unref === 'function'
  )
}


================================================
FILE: src/runtime/thread-worker.ts
================================================
import { fileURLToPath } from 'node:url'
import { type TransferListItem, Worker } from 'node:worker_threads'
import { type TinypoolWorker, type TinypoolChannel } from '../common'

export default class ThreadWorker implements TinypoolWorker {
  name = 'ThreadWorker'
  runtime = 'worker_threads'
  thread!: Worker
  threadId!: number
  channel?: TinypoolChannel

  initialize(options: Parameters<TinypoolWorker['initialize']>[0]) {
    this.thread = new Worker(
      fileURLToPath(import.meta.url + '/../entry/worker.js'),
      options
    )
    this.threadId = this.thread.threadId
  }

  async terminate() {
    const output = await this.thread.terminate()

    this.channel?.onClose?.()

    return output
  }

  postMessage(message: any, transferListItem?: Readonly<TransferListItem[]>) {
    return this.thread.postMessage(message, transferListItem)
  }

  on(event: string, callback: (...args: any[]) => void) {
    return this.thread.on(event, callback)
  }

  once(event: string, callback: (...args: any[]) => void) {
    return this.thread.once(event, callback)
  }

  emit(event: string, ...data: any[]) {
    return this.thread.emit(event, ...data)
  }

  ref() {
    return this.thread.ref()
  }

  unref() {
    return this.thread.unref()
  }

  setChannel(channel: TinypoolChannel) {
    if (channel.onMessage) {
      throw new Error(
        "{ runtime: 'worker_threads' } doesn't support channel.onMessage. Use transferListItem for listening to messages instead."
      )
    }

    if (channel.postMessage) {
      throw new Error(
        "{ runtime: 'worker_threads' } doesn't support channel.postMessage. Use transferListItem for sending to messages instead."
      )
    }

    // Previous channel exists in non-isolated runs
    if (this.channel && this.channel !== channel) {
      this.channel.onClose?.()
    }

    this.channel = channel
  }
}


================================================
FILE: src/utils.ts
================================================
export function stdout(): NodeJS.WriteStream | undefined {
  // @ts-expect-error Node.js maps process.stdout to console._stdout
  return console._stdout || process.stdout || undefined
}

export function stderr(): NodeJS.WriteStream | undefined {
  // @ts-expect-error Node.js maps process.stderr to console._stderr
  return console._stderr || process.stderr || undefined
}


================================================
FILE: test/async-context.test.ts
================================================
import { createHook, executionAsyncId } from 'node:async_hooks'
import { Tinypool } from 'tinypool'
import { dirname, resolve } from 'node:path'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('postTask() calls the correct async hooks', async () => {
  let taskId: number
  let initCalls = 0
  let beforeCalls = 0
  let afterCalls = 0
  let resolveCalls = 0

  const hook = createHook({
    init(id, type) {
      if (type === 'Tinypool.Task') {
        initCalls++
        taskId = id
      }
    },
    before(id) {
      if (id === taskId) beforeCalls++
    },
    after(id) {
      if (id === taskId) afterCalls++
    },
    promiseResolve() {
      if (executionAsyncId() === taskId) resolveCalls++
    },
  })
  hook.enable()

  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })

  await pool.run('42')

  hook.disable()
  expect(initCalls).toBe(1)
  expect(beforeCalls).toBe(1)
  expect(afterCalls).toBe(1)
  expect(resolveCalls).toBe(1)
})


================================================
FILE: test/atomic.test.ts
================================================
import Tinypool from 'tinypool'
import { dirname, resolve } from 'node:path'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('coverage test for Atomics optimization', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/notify-then-sleep-or.js'),
    minThreads: 2,
    maxThreads: 2,
    concurrentTasksPerWorker: 2,
  })

  const tasks = []
  let v: number

  // Post 4 tasks, and wait for all of them to be ready.
  const i32array = new Int32Array(new SharedArrayBuffer(4))
  for (let index = 0; index < 4; index++) {
    tasks.push(pool.run({ i32array, index }))
  }

  // Wait for 2 tasks to enter 'wait' state.
  do {
    v = Atomics.load(i32array, 0)
    if (popcount8(v) >= 2) break
    Atomics.wait(i32array, 0, v)
  } while (true) // eslint-disable-line no-constant-condition -- intentional

  // The check above could also be !== 2 but it's hard to get things right
  // sometimes and this gives us a nice assertion. Basically, at this point
  // exactly 2 tasks should be in Atomics.wait() state.
  expect(popcount8(v)).toBe(2)
  // Wake both tasks up as simultaneously as possible. The other 2 tasks should
  // then start executing.
  Atomics.store(i32array, 0, 0)
  Atomics.notify(i32array, 0, Infinity)

  // Wait for the other 2 tasks to enter 'wait' state.
  do {
    v = Atomics.load(i32array, 0)
    if (popcount8(v) >= 2) break
    Atomics.wait(i32array, 0, v)
  } while (true) // eslint-disable-line no-constant-condition -- intentional

  // At this point, the first two tasks are definitely finished and have
  // definitely posted results back to the main thread, and the main thread
  // has definitely not received them yet, meaning that the Atomics check will
  // be used. Making sure that that works is the point of this test.

  // Wake up the remaining 2 tasks in order to make sure that the test finishes.
  // Do the same consistency check beforehand as above.
  expect(popcount8(v)).toBe(2)
  Atomics.store(i32array, 0, 0)
  Atomics.notify(i32array, 0, Infinity)

  await Promise.all(tasks)
})

// Inefficient but straightforward 8-bit popcount
function popcount8(v: number): number {
  v &= 0xff
  if (v & 0b11110000) return popcount8(v >>> 4) + popcount8(v & 0xb00001111)
  if (v & 0b00001100) return popcount8(v >>> 2) + popcount8(v & 0xb00000011)
  if (v & 0b00000010) return popcount8(v >>> 1) + popcount8(v & 0xb00000001)
  return v
}

test('avoids unbounded recursion', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/simple-isworkerthread.js'),
    minThreads: 2,
    maxThreads: 2,
  })

  const tasks = []
  for (let i = 1; i <= 10000; i++) {
    tasks.push(pool.run(null))
  }

  await Promise.all(tasks)
})


================================================
FILE: test/fixtures/child_process-communication.mjs
================================================
export default async function run(task) {
  let resolve = () => {}
  const promise = new Promise((r) => (resolve = r))

  process.send('Child process started')

  process.on('message', (message) => {
    process.send({ received: message, response: 'Hello from worker' })
    resolve({ received: task, response: 'Hello from worker' })
  })

  return promise
}


================================================
FILE: test/fixtures/esm-export.mjs
================================================
export default function (code) {
  return eval(code)
}


================================================
FILE: test/fixtures/eval.js
================================================
export default function (code) {
  return eval(code)
}


================================================
FILE: test/fixtures/isolated.js
================================================
let count = 0

export default () => count++


================================================
FILE: test/fixtures/leak-memory.js
================================================
/** Enable to see memory leak logging */
const logOutput = false

// eslint-disable-next-line prefer-const -- intentional
export let leaks = []

/**
 * Leak some memory to test memory limit usage.
 * The argument `bytes` is not 100% accurate of the leaked bytes but good enough.
 */
export default function run(bytes) {
  const before = process.memoryUsage().heapUsed

  for (const _ of Array(bytes).fill()) {
    leaks.push(new SharedArrayBuffer(1024))
  }
  const after = process.memoryUsage().heapUsed
  const diff = after - before

  if (logOutput) {
    console.log(`Leaked: ${diff}. Heap used: ${process.memoryUsage().heapUsed}`)
  }
}


================================================
FILE: test/fixtures/move.js
================================================
import Tinypool from '../../dist/index.js'
import assert from 'node:assert'
import { types } from 'node:util'

export default function (moved) {
  if (moved !== undefined) {
    assert(types.isAnyArrayBuffer(moved))
  }
  return Tinypool.move(new ArrayBuffer(10))
}


================================================
FILE: test/fixtures/multiple.js
================================================
'use strict'

export function a() {
  return 'a'
}

export function b() {
  return 'b'
}

export default a


================================================
FILE: test/fixtures/nested-pool.mjs
================================================
import { cpus } from 'node:os'
import { Tinypool } from 'tinypool'

export default async function nestedPool() {
  const pool = new Tinypool({
    filename: new URL(import.meta.url, import.meta.url).href,
    runtime: 'child_process',
    isolateWorkers: true,
    minThreads: cpus().length - 1,
    maxThreads: cpus().length - 1,
  })

  await Promise.resolve()
  void pool.recycleWorkers()
}

export function entrypoint() {}


================================================
FILE: test/fixtures/notify-then-sleep-or.js
================================================
// Set the index-th bith in i32array[0], then wait for it to be un-set again.
export default function ({ i32array, index }) {
  Atomics.or(i32array, 0, 1 << index)
  Atomics.notify(i32array, 0, Infinity)
  do {
    const v = Atomics.load(i32array, 0)
    if (!(v & (1 << index))) break
    Atomics.wait(i32array, 0, v)
  } while (true) // eslint-disable-line no-constant-condition -- intentional
}


================================================
FILE: test/fixtures/resource-limits.js
================================================
'use strict'

export default () => {
  const array = []
  while (true) {
    array.push([array])
  }
}


================================================
FILE: test/fixtures/simple-isworkerthread.js
================================================
import Tinypool from '../../dist/index.js'
import assert from 'node:assert'

assert.strictEqual(Tinypool.isWorkerThread, true)

export default function () {
  return 'done'
}


================================================
FILE: test/fixtures/simple-workerdata.js
================================================
import Tinypool from '../../dist/index.js'
import assert from 'node:assert'

assert.strictEqual(Tinypool.workerData, 'ABC')

export default function () {
  return 'done'
}


================================================
FILE: test/fixtures/sleep.js
================================================
import { promisify } from 'node:util'
const sleep = promisify(setTimeout)

const buf = new Uint32Array(new SharedArrayBuffer(4))

export default async ({ time = 100, a }) => {
  await sleep(time)
  const ret = Atomics.exchange(buf, 0, a)
  return ret
}


================================================
FILE: test/fixtures/stdio.mjs
================================================
export default function run() {
  process.stdout.write('Worker message')
  process.stderr.write('Worker error')
}


================================================
FILE: test/fixtures/teardown.mjs
================================================
import { setTimeout } from 'node:timers/promises'

let state = 0

/** @type {import("node:worker_threads").MessagePort } */
let port

export default function task(options) {
  port ||= options?.port
  state++

  return `Output of task #${state}`
}

export async function namedTeardown() {
  await setTimeout(50)

  port?.postMessage(`Teardown of task #${state}`)
}


================================================
FILE: test/fixtures/wait-for-notify.js
================================================
export default function (i32array) {
  Atomics.wait(i32array, 0, 0)
  Atomics.store(i32array, 0, -1)
  Atomics.notify(i32array, 0, Infinity)
}


================================================
FILE: test/fixtures/wait-for-others.js
================================================
import { threadId } from 'node:worker_threads'

export default function ([i32array, n]) {
  Atomics.add(i32array, 0, 1)
  Atomics.notify(i32array, 0, Infinity)
  let lastSeenValue
  while ((lastSeenValue = Atomics.load(i32array, 0)) < n) {
    Atomics.wait(i32array, 0, lastSeenValue)
  }
  return threadId
}


================================================
FILE: test/fixtures/workerId.js
================================================
import { workerId } from '../../dist/index.js'

export default async ({ slow }) => {
  if (slow) {
    await new Promise((res) => setTimeout(res, 300))
  }

  return workerId
}


================================================
FILE: test/globals.test.ts
================================================
import * as path from 'node:path'
import { fileURLToPath } from 'node:url'
import { Tinypool } from 'tinypool'

const __dirname = path.dirname(fileURLToPath(import.meta.url))

describe.each(['worker_threads', 'child_process'] as const)('%s', (runtime) => {
  test("doesn't hang when process is overwritten", async () => {
    const pool = createPool({ runtime })

    const result = await pool.run(`
    (async () => {
      return new Promise(resolve => {
        globalThis.process = { exit: resolve };
        process.exit("exit() from overwritten process");
      });
    })();
    `)
    expect(result).toBe('exit() from overwritten process')
  })
})

function createPool(options: Partial<Tinypool['options']>) {
  const pool = new Tinypool({
    filename: path.resolve(__dirname, 'fixtures/eval.js'),
    minThreads: 1,
    maxThreads: 1,
    ...options,
  })

  return pool
}


================================================
FILE: test/idle-timeout.test.ts
================================================
import { promisify } from 'node:util'
import { dirname, resolve } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))
const delay = promisify(setTimeout)
test('idle timeout will let go of threads early', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/wait-for-others.js'),
    idleTimeout: 500,
    minThreads: 1,
    maxThreads: 2,
  })

  expect(pool.threads.length).toBe(1)
  const buffer = new Int32Array(new SharedArrayBuffer(4))

  const firstTasks = [pool.run([buffer, 2]), pool.run([buffer, 2])]
  expect(pool.threads.length).toBe(2)

  const earlyThreadIds = await Promise.all(firstTasks)
  expect(pool.threads.length).toBe(2)

  await delay(2000)
  expect(pool.threads.length).toBe(1)

  const secondTasks = [pool.run([buffer, 4]), pool.run([buffer, 4])]
  expect(pool.threads.length).toBe(2)

  const lateThreadIds = await Promise.all(secondTasks)

  // One thread should have been idle in between and exited, one should have
  // been reused.
  expect(earlyThreadIds.length).toBe(2)
  expect(lateThreadIds.length).toBe(2)
  expect(new Set([...earlyThreadIds, ...lateThreadIds]).size).toBe(3)
})


================================================
FILE: test/isolation.test.ts
================================================
import { dirname, resolve } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

describe.each(['worker_threads', 'child_process'] as const)('%s', (runtime) => {
  test('idle workers can be recycled', async () => {
    const pool = new Tinypool({
      runtime,
      filename: resolve(__dirname, 'fixtures/sleep.js'),
      minThreads: 4,
      maxThreads: 4,
      isolateWorkers: false,
    })

    function getThreadIds() {
      return pool.threads.map((thread) => thread.threadId).sort((a, b) => a - b)
    }

    expect(pool.threads).toHaveLength(4)
    const initialThreadIds = getThreadIds()

    await Promise.all(times(4)(() => pool.run({})))
    expect(getThreadIds()).toStrictEqual(initialThreadIds)

    await pool.recycleWorkers()
    expect(pool.threads).toHaveLength(4)

    const newThreadIds = getThreadIds()
    initialThreadIds.forEach((id) => expect(newThreadIds).not.toContain(id))

    await Promise.all(times(4)(() => pool.run({})))
    initialThreadIds.forEach((id) => expect(newThreadIds).not.toContain(id))
    expect(getThreadIds()).toStrictEqual(newThreadIds)
  })

  test('running workers can recycle after task execution finishes', async () => {
    const pool = new Tinypool({
      runtime,
      filename: resolve(__dirname, 'fixtures/sleep.js'),
      minThreads: 4,
      maxThreads: 4,
      isolateWorkers: false,
    })

    function getThreadIds() {
      return pool.threads.map((thread) => thread.threadId).sort((a, b) => a - b)
    }

    expect(pool.threads).toHaveLength(4)
    const initialThreadIds = getThreadIds()

    const tasks = [
      ...times(2)(() => pool.run({ time: 1 })),
      ...times(2)(() => pool.run({ time: 2000 })),
    ]

    // Wait for first two tasks to finish
    await Promise.all(tasks.slice(0, 2))

    await pool.recycleWorkers()
    const threadIds = getThreadIds()

    // Idle workers should have been recycled immediately
    // Running workers should not have recycled yet
    expect(intersection(threadIds, initialThreadIds)).toHaveLength(2)

    await Promise.all(tasks)

    // All workers should have recycled now
    const newThreadIds = getThreadIds()
    initialThreadIds.forEach((id) => expect(newThreadIds).not.toContain(id))
  })
})

function times(count: number) {
  return function run<T>(fn: () => T): T[] {
    return Array(count).fill(0).map(fn)
  }
}

function intersection<T>(a: T[], b: T[]) {
  return a.filter((value) => b.includes(value))
}


================================================
FILE: test/move.test.ts
================================================
import { Tinypool, isMovable, markMovable, isTransferable } from 'tinypool'
import { types } from 'node:util'
import { MessageChannel, MessagePort } from 'node:worker_threads'
import { dirname, resolve } from 'node:path'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

const transferableSymbol = Tinypool.transferableSymbol as never
const valueSymbol = Tinypool.valueSymbol as never

test('Marking an object as movable works as expected', async () => {
  const obj: any = {
    get [transferableSymbol](): object {
      return {}
    },
    get [valueSymbol](): object {
      return {}
    },
  }
  expect(isTransferable(obj)).toBe(true)
  expect(!isMovable(obj)).toBe(true) // It's not movable initially
  markMovable(obj)
  expect(isMovable(obj)).toBe(true) // It is movable now
})

test('Marking primitives and null works as expected', async () => {
  expect(Tinypool.move(null!)).toBe(null)
  expect(Tinypool.move(1 as any)).toBe(1)
  expect(Tinypool.move(false as any)).toBe(false)
  expect(Tinypool.move('test' as any)).toBe('test')
})

test('Using Tinypool.move() returns a movable object', async () => {
  const obj: any = {
    get [transferableSymbol](): object {
      return {}
    },
    get [valueSymbol](): object {
      return {}
    },
  }
  expect(!isMovable(obj)).toBe(true) // It's not movable initially
  const movable = Tinypool.move(obj)
  expect(isMovable(movable)).toBe(true) // It is movable now
})

test('Using ArrayBuffer works as expected', async () => {
  const ab = new ArrayBuffer(5)
  const movable = Tinypool.move(ab)
  expect(isMovable(movable)).toBe(true)
  expect(types.isAnyArrayBuffer(movable[valueSymbol])).toBe(true)
  expect(types.isAnyArrayBuffer(movable[transferableSymbol])).toBe(true)
  expect(movable[transferableSymbol]).toEqual(ab)
})

test('Using TypedArray works as expected', async () => {
  const ab = new Uint8Array(5)
  const movable = Tinypool.move(ab)
  expect(isMovable(movable)).toBe(true)
  expect(types.isArrayBufferView(movable[valueSymbol])).toBe(true)
  expect(types.isAnyArrayBuffer(movable[transferableSymbol])).toBe(true)
  expect(movable[transferableSymbol]).toEqual(ab.buffer)
})

test('Using MessagePort works as expected', async () => {
  const mc = new MessageChannel()
  const movable = Tinypool.move(mc.port1)
  expect(isMovable(movable)).toBe(true)
  expect((movable[valueSymbol] as unknown) instanceof MessagePort).toBe(true)
  expect((movable[transferableSymbol] as unknown) instanceof MessagePort).toBe(
    true
  )
  expect(movable[transferableSymbol]).toEqual(mc.port1)
})

test('Moving works', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/move.js'),
  })

  {
    const ab = new ArrayBuffer(10)
    const ret = await pool.run(Tinypool.move(ab))
    expect(ab.byteLength).toBe(0) // It was moved
    expect(types.isAnyArrayBuffer(ret)).toBe(true)
  }

  {
    // Test with empty transferList
    const ab = new ArrayBuffer(10)
    const ret = await pool.run(Tinypool.move(ab), { transferList: [] })
    expect(ab.byteLength).toBe(0) // It was moved
    expect(types.isAnyArrayBuffer(ret)).toBe(true)
  }

  {
    // Test with empty transferList
    const ab = new ArrayBuffer(10)
    const ret = await pool.run(Tinypool.move(ab))
    expect(ab.byteLength).toBe(0) // It was moved
    expect(types.isAnyArrayBuffer(ret)).toBe(true)
  }

  {
    // Test with empty transferList
    const ab = new ArrayBuffer(10)
    const ret = await pool.run(Tinypool.move(ab), { transferList: [] })
    expect(ab.byteLength).toBe(0) // It was moved
    expect(types.isAnyArrayBuffer(ret)).toBe(true)
  }
})


================================================
FILE: test/options.test.ts
================================================
import { expect, test, vi } from 'vitest'

let Tinypool: typeof import('tinypool').default
const cpuCount = vi.hoisted(() => 100)

beforeAll(async () => {
  vi.resetModules()
  Tinypool = (await import('tinypool')).default
})

test('fractional thread limits can be set', async () => {
  const min = 0.5
  const max = 0.75
  const p = new Tinypool({
    minThreads: min,
    maxThreads: max,
  })

  expect(p.options.minThreads).toBe(cpuCount * min)
  expect(p.options.maxThreads).toBe(cpuCount * max)
})

test('fractional thread limits result is 1 for very low fractions', async () => {
  const min = 0.00005
  const max = 0.00006
  const p = new Tinypool({
    minThreads: min,
    maxThreads: max,
  })

  expect(p.options.minThreads).toBe(1)
  expect(p.options.maxThreads).toBe(1)
})

test('fractional thread limits in the wrong order throw an error', async () => {
  expect(() => {
    new Tinypool({
      minThreads: 0.75,
      maxThreads: 0.25,
    })
  }).toThrow()
  expect(() => {
    new Tinypool({
      minThreads: 0.75,
      maxThreads: 1,
    })
  }).toThrow()
})

vi.mock(import('node:os'), async (importOriginal) => {
  const original = await importOriginal()
  return {
    ...original,
    availableParallelism: () => cpuCount,
  }
})

vi.mock(import('node:child_process'), async (importOriginal) => {
  const original = await importOriginal()
  return {
    ...original,
    default: { ...original.default, execSync: () => cpuCount as any },
  }
})


================================================
FILE: test/pool-destroy.test.ts
================================================
import { createHook } from 'node:async_hooks'
import { dirname, resolve } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('can destroy pool while tasks are running', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })
  setImmediate(() => void pool.destroy())
  await expect(pool.run('while(1){}')).rejects.toThrow(
    /Terminating worker thread/
  )
})

test('destroy after initializing should work (#43)', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/sleep.js'),
    isolateWorkers: true,
  })

  const promise = expect(pool.run({})).rejects.toThrow(
    /Terminating worker thread/
  )

  setImmediate(() => void pool.destroy())
  await promise
})

test('cleans up async resources', async () => {
  let onCleanup = () => {}
  const waitForCleanup = new Promise<void>((r) => (onCleanup = r))
  const timeout = setTimeout(() => {
    throw new Error('Timeout waiting for async resource destroying')
  }, 2_000).unref()

  const ids = new Set<number>()

  const hook = createHook({
    init(asyncId, type) {
      if (type === 'Tinypool') {
        ids.add(asyncId)
      }
    },
    destroy(asyncId) {
      if (ids.has(asyncId)) {
        ids.delete(asyncId)
        onCleanup()
        clearTimeout(timeout)
      }
    },
  })
  hook.enable()

  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    maxThreads: 1,
    minThreads: 1,
  })

  await pool.run('42')

  expect(ids.size).toBe(1)

  await pool.destroy()
  await waitForCleanup

  expect(ids.size).toBe(0)
  hook.disable()
})


================================================
FILE: test/resource-limits.test.ts
================================================
import { dirname, resolve } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('resourceLimits causes task to reject', async () => {
  const worker = new Tinypool({
    filename: resolve(__dirname, 'fixtures/resource-limits.js'),
    resourceLimits: {
      maxOldGenerationSizeMb: 4,
      maxYoungGenerationSizeMb: 2,
      codeRangeSizeMb: 4,
    },
  })
  worker.on('error', () => {
    // Ignore any additional errors that may occur.
    // This may happen because when the Worker is
    // killed a new worker is created that may hit
    // the memory limits immediately. When that
    // happens, there is no associated Promise to
    // reject so we emit an error event instead.
    // We don't care so much about that here. We
    // could potentially avoid the issue by setting
    // higher limits above but rather than try to
    // guess at limits that may work consistently,
    // let's just ignore the additional error for
    // now.
  })
  const limits: any = worker.options.resourceLimits
  expect(limits.maxOldGenerationSizeMb).toBe(4)
  expect(limits.maxYoungGenerationSizeMb).toBe(2)
  expect(limits.codeRangeSizeMb).toBe(4)
  await expect(worker.run(null)).rejects.toThrow(
    /Worker terminated due to reaching memory limit: JS heap out of memory/
  )
})

describe.each(['worker_threads', 'child_process'] as const)('%s', (runtime) => {
  test('worker is recycled after reaching maxMemoryLimitBeforeRecycle', async () => {
    const pool = new Tinypool({
      filename: resolve(__dirname, 'fixtures/leak-memory.js'),
      maxMemoryLimitBeforeRecycle: 10_000_000,
      isolateWorkers: false,
      minThreads: 1,
      maxThreads: 1,
      runtime,
    })

    const originalWorkerId = pool.threads[0]?.threadId
    expect(originalWorkerId).toBeGreaterThan(0)

    let finalThreadId = originalWorkerId
    let rounds = 0

    // This is just an estimate of how to leak "some" memory - it's not accurate.
    // Running 100 loops should be enough to make the worker reach memory limit and be recycled.
    // Use the `rounds` to make sure we don't reach the limit on the first round.
    for (const _ of Array(100).fill(0)) {
      await pool.run(10_000)

      if (pool.threads[0]) {
        finalThreadId = pool.threads[0].threadId
      }

      if (finalThreadId !== originalWorkerId) {
        break
      }

      rounds++
    }

    // Test setup should not reach max memory on first round
    expect(rounds).toBeGreaterThan(1)

    // Thread should have been recycled
    expect(finalThreadId).not.toBe(originalWorkerId)
  })

  test('recycled workers should not crash pool (regression)', async () => {
    const pool = new Tinypool({
      filename: resolve(__dirname, 'fixtures/leak-memory.js'),
      maxMemoryLimitBeforeRecycle: 10,
      isolateWorkers: false,
      minThreads: 2,
      maxThreads: 2,
      runtime,
    })

    // This should not crash the pool
    await Promise.all(
      Array(10)
        .fill(0)
        .map(() => pool.run(10_000))
    )
  })
})


================================================
FILE: test/runtime.test.ts
================================================
import EventEmitter from 'node:events'
import * as path from 'node:path'
import { fileURLToPath } from 'node:url'
import { Tinypool } from 'tinypool'

const __dirname = path.dirname(fileURLToPath(import.meta.url))

describe('worker_threads', () => {
  test('runs code in worker_threads', async () => {
    const pool = createPool({ runtime: 'worker_threads' })

    const result = await pool.run(`
      (async () => {
        const workerThreads = await import("worker_threads");

        return {
          sum: 11 + 12,
          isMainThread: workerThreads.isMainThread,
          pid: process.pid,
        }
      })()
    `)
    expect(result.sum).toBe(23)
    expect(result.isMainThread).toBe(false)
    expect(result.pid).toBe(process.pid)
  })

  test('sets tinypool state', async () => {
    const pool = createPool({ runtime: 'worker_threads' })

    const result = await pool.run('process.__tinypool_state__')
    expect(result.isTinypoolWorker).toBe(true)
    expect(result.isWorkerThread).toBe(true)
    expect(result.isChildProcess).toBe(undefined)
  })

  test("worker's threadId is used as threadId", async () => {
    const pool = createPool({ runtime: 'worker_threads' })
    const threadId = pool.threads[0]!.threadId

    const result = await pool.run(`
      (async () => {
        const workerThreads = await import("worker_threads");
        return workerThreads.threadId;
      })()
    `)
    expect(result).toBe(threadId)
  })

  test('channel is closed when isolated', async () => {
    const pool = createPool({
      runtime: 'worker_threads',
      isolateWorkers: true,
      minThreads: 2,
      maxThreads: 2,
    })

    const events: string[] = []

    await pool.run('', { channel: { onClose: () => events.push('call #1') } })
    expect(events).toStrictEqual(['call #1'])

    await pool.run('', { channel: { onClose: () => events.push('call #2') } })
    expect(events).toStrictEqual(['call #1', 'call #2'])

    await pool.run('', { channel: { onClose: () => events.push('call #3') } })
    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])

    await pool.destroy()
    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])
  })

  test('channel is closed when non-isolated', async () => {
    const pool = createPool({
      runtime: 'worker_threads',
      isolateWorkers: false,
      minThreads: 2,
      maxThreads: 2,
    })

    const events: string[] = []

    await pool.run('', { channel: { onClose: () => events.push('call #1') } })
    expect(events).toStrictEqual([])

    await pool.run('', { channel: { onClose: () => events.push('call #2') } })
    expect(events).toStrictEqual(['call #1'])

    await pool.run('', { channel: { onClose: () => events.push('call #3') } })
    expect(events).toStrictEqual(['call #1', 'call #2'])

    await pool.destroy()
    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])
  })
})

describe('child_process', () => {
  test('runs code in child_process', async () => {
    const pool = createPool({ runtime: 'child_process' })

    const result = await pool.run(`
    (async () => {
      const workerThreads = await import("worker_threads");

      return {
        sum: 11 + 12,
        isMainThread: workerThreads.isMainThread,
        pid: process.pid,
      }
    })()
  `)
    expect(result.sum).toBe(23)
    expect(result.isMainThread).toBe(true)
    expect(result.pid).not.toBe(process.pid)
  })

  test('sets tinypool state', async () => {
    const pool = createPool({ runtime: 'child_process' })

    const result = await pool.run('process.__tinypool_state__')
    expect(result.isTinypoolWorker).toBe(true)
    expect(result.isChildProcess).toBe(true)
    expect(result.isWorkerThread).toBe(undefined)
  })

  test("sub-process's process ID is used as threadId", async () => {
    const pool = createPool({ runtime: 'child_process' })
    const threadId = pool.threads[0]!.threadId

    const result = await pool.run('process.pid')
    expect(result).toBe(threadId)
  })

  test('child process workerId should be internal tinypool workerId', async () => {
    const pool = createPool({ runtime: 'child_process' })
    const workerId = await pool.run('process.__tinypool_state__.workerId')
    expect(workerId).toBe(1)
  })

  test('errors are serialized', async () => {
    const pool = createPool({ runtime: 'child_process' })

    const error = await pool
      .run("throw new TypeError('Test message');")
      .catch((e) => e)

    expect(error.name).toBe('TypeError')
    expect(error.message).toBe('Test message')
    expect(error.stack).toMatch('fixtures/eval.js')
  })

  test('can send messages to port', async () => {
    const pool = createPool({
      runtime: 'child_process',
      filename: path.resolve(
        __dirname,
        'fixtures/child_process-communication.mjs'
      ),
    })

    const emitter = new EventEmitter()

    const startup = new Promise<void>((resolve) =>
      emitter.on(
        'response',
        (message) => message === 'Child process started' && resolve()
      )
    )

    const runPromise = pool.run('default', {
      channel: {
        onMessage: (callback) => emitter.on('message', callback),
        postMessage: (message) => emitter.emit('response', message),
      },
    })

    // Wait for the child process to start
    await startup

    const response = new Promise<any>((resolve) =>
      emitter.on(
        'response',
        (message) => message !== 'Hello from main' && resolve(message)
      )
    )

    // Send message to child process
    emitter.emit('message', 'Hello from main')

    // Wait for task to finish
    await runPromise

    // Wait for response from child
    const result = await response

    expect(result).toMatchObject({
      received: 'Hello from main',
      response: 'Hello from worker',
    })
  })

  test('can send complex messages to port', async () => {
    const pool = createPool({
      runtime: 'child_process',
      filename: path.resolve(
        __dirname,
        'fixtures/child_process-communication.mjs'
      ),
      serialization: 'advanced',
    })

    const complexData = {
      bigint: 123456789123456789n,
      map: new Map([['hello', 'world']]),
      set: new Set(['hello', 'world']),
      error: new Error('message'),
      regexp: /regexp/,
    }

    const emitter = new EventEmitter()

    const startup = new Promise<void>((resolve) =>
      emitter.on(
        'response',
        (message) => message === 'Child process started' && resolve()
      )
    )

    const runPromise = pool.run(complexData, {
      channel: {
        onMessage: (callback) => emitter.on('message', callback),
        postMessage: (message) => emitter.emit('response', message),
      },
    })

    // Wait for the child process to start
    await startup

    const response = new Promise<any>((resolve) =>
      emitter.on('response', (message) => resolve(message))
    )

    // Send message to child process
    emitter.emit('message', complexData)

    // Wait for task to finish
    const runResult = await runPromise

    expect(runResult).toMatchObject({
      received: complexData,
      response: 'Hello from worker',
    })

    // Wait for response from child
    const channelResult = await response

    expect(channelResult).toMatchObject({
      received: complexData,
      response: 'Hello from worker',
    })
  })

  test('channel is closed when isolated', async () => {
    const pool = createPool({
      runtime: 'child_process',
      isolateWorkers: true,
      minThreads: 2,
      maxThreads: 2,
    })

    const events: string[] = []

    await pool.run('', { channel: { onClose: () => events.push('call #1') } })
    expect(events).toStrictEqual(['call #1'])

    await pool.run('', { channel: { onClose: () => events.push('call #2') } })
    expect(events).toStrictEqual(['call #1', 'call #2'])

    await pool.run('', { channel: { onClose: () => events.push('call #3') } })
    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])

    await pool.destroy()
    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])
  })

  test('channel is closed when non-isolated', async () => {
    const pool = createPool({
      runtime: 'child_process',
      isolateWorkers: false,
      minThreads: 2,
      maxThreads: 2,
    })

    const events: string[] = []

    await pool.run('', { channel: { onClose: () => events.push('call #1') } })
    expect(events).toStrictEqual([])

    await pool.run('', { channel: { onClose: () => events.push('call #2') } })
    expect(events).toStrictEqual(['call #1'])

    await pool.run('', { channel: { onClose: () => events.push('call #3') } })
    expect(events).toStrictEqual(['call #1', 'call #2'])

    await pool.destroy()
    expect(events).toStrictEqual(['call #1', 'call #2', 'call #3'])
  })
})

test('runtime can be changed after recycle', async () => {
  const pool = createPool({ runtime: 'worker_threads' })
  const getState = 'process.__tinypool_state__'

  await expect(
    Promise.all([pool.run(getState), pool.run(getState)])
  ).resolves.toMatchObject([{ isWorkerThread: true }, { isWorkerThread: true }])

  await pool.recycleWorkers({ runtime: 'child_process' })

  await expect(
    Promise.all([pool.run(getState), pool.run(getState)])
  ).resolves.toMatchObject([{ isChildProcess: true }, { isChildProcess: true }])

  await pool.recycleWorkers({ runtime: 'worker_threads' })

  expect(await pool.run(getState)).toMatchObject({
    isWorkerThread: true,
  })
})

test('isolated idle workers change runtime after recycle', async () => {
  const pool = createPool({
    runtime: 'worker_threads',
    minThreads: 2,
    maxThreads: 2,
    isolateWorkers: true,
  })
  const getState = 'process.__tinypool_state__'

  await expect(pool.run(getState)).resolves.toMatchObject({
    isWorkerThread: true,
  })

  await pool.recycleWorkers({ runtime: 'child_process' })

  await expect(
    Promise.all([pool.run(getState), pool.run(getState)])
  ).resolves.toMatchObject([{ isChildProcess: true }, { isChildProcess: true }])
})

function createPool(options: Partial<Tinypool['options']>) {
  const pool = new Tinypool({
    filename: path.resolve(__dirname, 'fixtures/eval.js'),
    minThreads: 1,
    maxThreads: 1,
    ...options,
  })

  return pool
}


================================================
FILE: test/simple.test.ts
================================================
import EventEmitter from 'node:events'
import { cpus } from 'node:os'
import { dirname, resolve } from 'node:path'
import Tinypool from 'tinypool'
import { fileURLToPath, pathToFileURL } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))
const sleep = async (num: number) =>
  await new Promise((res) => setTimeout(res, num))

test('basic test', async () => {
  const worker = new Tinypool({
    filename: resolve(__dirname, 'fixtures/simple-isworkerthread.js'),
  })
  const result = await worker.run(null)
  expect(result).toBe('done')
})

test('isWorkerThread correct value', async () => {
  expect(Tinypool.isWorkerThread).toBe(false)
})

test('Tinypool instance is an EventEmitter', async () => {
  const piscina = new Tinypool()
  expect(piscina instanceof EventEmitter).toBe(true)
})

test('Tinypool constructor options are correctly set', async () => {
  const piscina = new Tinypool({
    minThreads: 10,
    maxThreads: 20,
    maxQueue: 30,
  })

  expect(piscina.options.minThreads).toBe(10)
  expect(piscina.options.maxThreads).toBe(20)
  expect(piscina.options.maxQueue).toBe(30)
})
//
test('trivial eval() handler works', async () => {
  const worker = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })
  const result = await worker.run('42')
  expect(result).toBe(42)
})

test('async eval() handler works', async () => {
  const worker = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })
  const result = await worker.run('Promise.resolve(42)')
  expect(result).toBe(42)
})

test('filename can be provided while posting', async () => {
  const worker = new Tinypool()
  const result = await worker.run('Promise.resolve(42)', {
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })
  expect(result).toBe(42)
})

test('filename can be null when initially provided', async () => {
  const worker = new Tinypool({ filename: null })
  const result = await worker.run('Promise.resolve(42)', {
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })
  expect(result).toBe(42)
})

test('filename must be provided while posting', async () => {
  const worker = new Tinypool()
  await expect(worker.run('doesn’t matter')).rejects.toThrow(
    /filename must be provided to run\(\) or in options object/
  )
})

test('passing env to workers works', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    env: { A: 'foo' },
  })

  const env = await pool.run('({...process.env})')
  expect(env).toEqual({ A: 'foo' })
})

test('passing argv to workers works', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    argv: ['a', 'b', 'c'],
  })

  const env = await pool.run('process.argv.slice(2)')
  expect(env).toEqual(['a', 'b', 'c'])
})

test('passing argv to child process', async () => {
  const pool = new Tinypool({
    runtime: 'child_process',
    filename: resolve(__dirname, 'fixtures/eval.js'),
    argv: ['a', 'b', 'c'],
  })

  const env = await pool.run('process.argv.slice(2)')
  expect(env).toEqual(['a', 'b', 'c'])
})

test('passing execArgv to workers works', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    execArgv: ['--no-warnings'],
  })

  const env = await pool.run('process.execArgv')
  expect(env).toEqual(['--no-warnings'])
})

test('passing valid workerData works', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/simple-workerdata.js'),
    workerData: 'ABC',
  })
  expect(Tinypool.workerData).toBe(undefined)

  await pool.run(null)
})

test('filename can be a file:// URL', async () => {
  const worker = new Tinypool({
    filename: pathToFileURL(resolve(__dirname, 'fixtures/eval.js')).href,
  })
  const result = await worker.run('42')
  expect(result).toBe(42)
})

test('filename can be a file:// URL to an ESM module', async () => {
  const worker = new Tinypool({
    filename: pathToFileURL(resolve(__dirname, 'fixtures/esm-export.mjs')).href,
  })
  const result = await worker.run('42')
  expect(result).toBe(42)
})

test('named tasks work', async () => {
  const worker = new Tinypool({
    filename: resolve(__dirname, 'fixtures/multiple.js'),
  })

  expect(await worker.run({}, { name: 'a' })).toBe('a')
  expect(await worker.run({}, { name: 'b' })).toBe('b')
  expect(await worker.run({})).toBe('a')
})

test('named tasks work', async () => {
  const worker = new Tinypool({
    filename: resolve(__dirname, 'fixtures/multiple.js'),
    name: 'b',
  })

  expect(await worker.run({}, { name: 'a' })).toBe('a')
  expect(await worker.run({}, { name: 'b' })).toBe('b')
  expect(await worker.run({})).toBe('b')
})

test('isolateWorkers: false', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/isolated.js'),
    isolateWorkers: false,
  })

  expect(await pool.run({})).toBe(0)
  expect(await pool.run({})).toBe(1)
  expect(await pool.run({})).toBe(2)
})

test('isolateWorkers: true', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/isolated.js'),
    isolateWorkers: true,
  })

  expect(await pool.run({})).toBe(0)
  expect(await pool.run({})).toBe(0)
  expect(await pool.run({})).toBe(0)
})

test('workerId should never be more than maxThreads=1', async () => {
  const maxThreads = 1
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/workerId.js'),
    isolateWorkers: true,
    maxThreads: maxThreads,
  })
  await pool.destroy()
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)

  await sleep(300)
})

test('workerId should never be more than maxThreads', async () => {
  const maxThreads = Math.floor(Math.random() * (4 - 1 + 1) + 1)
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/workerId.js'),
    isolateWorkers: true,
    maxThreads: maxThreads,
  })
  await pool.destroy()
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)
  await expect(pool.run({})).resolves.toBeLessThanOrEqual(maxThreads)

  await sleep(300)
})

test('worker count should never be below minThreads when using isolateWorkers', async () => {
  const minThreads = 4
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/workerId.js'),
    isolateWorkers: true,
    minThreads,
  })
  await pool.run({})
  expect(pool.threads.length).toBe(minThreads)
  await pool.run({})
  expect(pool.threads.length).toBe(minThreads)
  await pool.run({})
  expect(pool.threads.length).toBe(minThreads)
  await pool.run({})
  expect(pool.threads.length).toBe(minThreads)
  await pool.run({})
  expect(pool.threads.length).toBe(minThreads)
  await pool.run({})
  expect(pool.threads.length).toBe(minThreads)

  await sleep(300)
})

test('workerId should never be duplicated', async () => {
  const maxThreads = cpus().length + 4
  // console.log('maxThreads', maxThreads)
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/workerId.js'),
    isolateWorkers: true,
    // challenge tinypool
    maxThreads,
  })
  let duplicated = false
  const workerIds: number[] = []

  function addWorkerId(workerId: number) {
    if (workerIds.includes(workerId)) {
      duplicated = true
      // console.log('fucked')
    }
    workerIds.push(workerId)
  }

  const createWorkerId = async (): Promise<number> => {
    const result = await pool.run({})
    addWorkerId(result)
    return result
  }

  for (let i = 0; i < 20; i++) {
    if (duplicated) {
      continue
    }
    await Promise.all(
      new Array(maxThreads - 2).fill(0).map(() => createWorkerId())
    )
    workerIds.length = 0

    expect(duplicated).toBe(false)
  }

  await pool.destroy()
  await sleep(3000)
}, 30000)

test('isolateWorkers: true with minThreads of 0 should not halt (#42)', async () => {
  const minThreads = 0,
    maxThreads = 6
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/isolated.js'),
    minThreads,
    maxThreads,
    isolateWorkers: true,
  })
  // https://github.com/tinylibs/tinypool/pull/44#discussion_r1070169279
  const promises = []
  for (let i = 0; i < maxThreads + 1; i++) {
    promises.push(pool.run({}))
  }
  await Promise.all(promises)
})


================================================
FILE: test/task-queue.test.ts
================================================
import { dirname, resolve } from 'node:path'
import { Tinypool, type Task, type TaskQueue } from 'tinypool'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('will put items into a task queue until they can run', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/wait-for-notify.js'),
    minThreads: 2,
    maxThreads: 3,
  })
  expect(pool.threads.length).toBe(2)
  expect(pool.queueSize).toBe(0)

  const buffers = [
    new Int32Array(new SharedArrayBuffer(4)),
    new Int32Array(new SharedArrayBuffer(4)),
    new Int32Array(new SharedArrayBuffer(4)),
    new Int32Array(new SharedArrayBuffer(4)),
  ]

  const results = []

  results.push(pool.run(buffers[0]))
  expect(pool.threads.length).toBe(2)
  expect(pool.queueSize).toBe(0)

  results.push(pool.run(buffers[1]))
  expect(pool.threads.length).toBe(2)
  expect(pool.queueSize).toBe(0)

  results.push(pool.run(buffers[2]))
  expect(pool.threads.length).toBe(3)
  expect(pool.queueSize).toBe(0)

  results.push(pool.run(buffers[3]))
  expect(pool.threads.length).toBe(3)
  expect(pool.queueSize).toBe(1)

  for (const buffer of buffers) {
    Atomics.store(buffer, 0, 1)
    Atomics.notify(buffer, 0, 1)
  }

  await results[0]
  expect(pool.queueSize).toBe(0)

  await Promise.all(results)
})

test('will reject items over task queue limit', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    minThreads: 0,
    maxThreads: 1,
    maxQueue: 2,
  })
  const promises: Promise<void>[] = []

  expect(pool.threads.length).toBe(0)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /Terminating worker thread/
    )
  )

  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /Terminating worker thread/
    )
  )
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(1)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /Terminating worker thread/
    )
  )
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(2)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /Task queue is at limit/
    )
  )

  await pool.destroy()
  await Promise.all(promises)
})

test('will reject items when task queue is unavailable', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    minThreads: 0,
    maxThreads: 1,
    maxQueue: 0,
  })
  const promises: Promise<void>[] = []

  expect(pool.threads.length).toBe(0)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /Terminating worker thread/
    )
  )
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /No task queue available and all Workers are busy/
    )
  )

  await pool.destroy()
  await Promise.all(promises)
})

test('will reject items when task queue is unavailable (fixed thread count)', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    minThreads: 1,
    maxThreads: 1,
    maxQueue: 0,
  })
  const promises: Promise<void>[] = []

  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /Terminating worker thread/
    )
  )
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(pool.run('while (true) {}')).rejects.toThrow(
      /No task queue available and all Workers are busy/
    )
  )

  await pool.destroy()
  await Promise.all(promises)
})

test('tasks can share a Worker if requested (both tests blocking)', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/wait-for-notify.js'),
    minThreads: 0,
    maxThreads: 1,
    maxQueue: 0,
    concurrentTasksPerWorker: 2,
  })
  const promises: Promise<void>[] = []

  expect(pool.threads.length).toBe(0)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(
      pool.run(new Int32Array(new SharedArrayBuffer(4)))
    ).rejects.toBeTruthy()
  )
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  promises.push(
    expect(
      pool.run(new Int32Array(new SharedArrayBuffer(4)))
    ).rejects.toBeTruthy()
  )
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  await pool.destroy()
  await Promise.all(promises)
})

test('tasks can share a Worker if requested (both tests finish)', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/wait-for-notify.js'),
    minThreads: 1,
    maxThreads: 1,
    maxQueue: 0,
    concurrentTasksPerWorker: 2,
  })

  const buffers = [
    new Int32Array(new SharedArrayBuffer(4)),
    new Int32Array(new SharedArrayBuffer(4)),
  ] as const

  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  const firstTask = pool.run(buffers[0])
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  const secondTask = pool.run(buffers[1])
  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)

  Atomics.store(buffers[0] as any, 0, 1)
  Atomics.store(buffers[1] as any, 0, 1)
  Atomics.notify(buffers[0] as any, 0, 1)
  Atomics.notify(buffers[1] as any, 0, 1)
  Atomics.wait(buffers[0] as any, 0, 1)
  Atomics.wait(buffers[1] as any, 0, 1)

  await firstTask
  expect(buffers[0][0]).toBe(-1)
  await secondTask
  expect(buffers[1][0]).toBe(-1)

  expect(pool.threads.length).toBe(1)
  expect(pool.queueSize).toBe(0)
})

test('custom task queue works', async () => {
  let sizeCalled: boolean = false
  let shiftCalled: boolean = false
  let pushCalled: boolean = false

  class CustomTaskPool implements TaskQueue {
    tasks: Task[] = []

    get size(): number {
      sizeCalled = true
      return this.tasks.length
    }

    shift(): Task | null {
      shiftCalled = true
      return this.tasks.length > 0 ? (this.tasks.shift() as Task) : null
    }

    push(task: Task): void {
      pushCalled = true
      this.tasks.push(task)

      expect(Tinypool.queueOptionsSymbol in task).toBeTruthy()
      if ((task as any).task.a === 3) {
        // @ts-expect-error -- intentional
        expect(task[Tinypool.queueOptionsSymbol]).toBeNull()
      } else {
        // @ts-expect-error -- intentional
        expect(task[Tinypool.queueOptionsSymbol].option).toEqual(
          (task as any).task.a
        )
      }
    }

    remove(task: Task): void {
      const index = this.tasks.indexOf(task)
      this.tasks.splice(index, 1)
    }

    cancel() {}
  }

  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    taskQueue: new CustomTaskPool(),
    // Setting maxThreads low enough to ensure we queue
    maxThreads: 1,
    minThreads: 1,
  })

  function makeTask(task: any, option: any) {
    return { ...task, [Tinypool.queueOptionsSymbol]: { option } }
  }

  const ret = await Promise.all([
    pool.run(makeTask({ a: 1 }, 1)),
    pool.run(makeTask({ a: 2 }, 2)),
    pool.run({ a: 3 }), // No queueOptionsSymbol attached
  ])

  expect(ret[0].a).toBe(1)
  expect(ret[1].a).toBe(2)
  expect(ret[2].a).toBe(3)

  expect(sizeCalled).toBeTruthy()
  expect(pushCalled).toBeTruthy()
  expect(shiftCalled).toBeTruthy()
})

test('queued tasks can be cancelled', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/sleep.js'),
    minThreads: 0,
    maxThreads: 1,
  })

  const time = 2000
  const taskCount = 10

  const promises = []
  let finishedTasks = 0
  let cancelledTasks = 0

  for (const _ of Array(taskCount)) {
    const promise = pool
      .run({ time })
      .then(() => {
        finishedTasks++
      })
      .catch((error) => {
        if (error.message !== 'The task has been cancelled') {
          throw error
        }
        cancelledTasks++
      })
    promises.push(promise)
  }

  // Wait for the first task to start
  await new Promise((resolve) => setTimeout(resolve, time / 2))
  expect(pool.queueSize).toBe(taskCount - 1)

  // One task is running, cancel the pending ones
  pool.cancelPendingTasks()

  // The first task should still be on-going, pending ones should have started their cancellation
  expect(finishedTasks).toBe(0)
  expect(pool.queueSize).toBe(0)

  await Promise.all(promises)

  expect({ finishedTasks, cancelledTasks }).toEqual({
    finishedTasks: 1,
    cancelledTasks: taskCount - 1,
  })
})


================================================
FILE: test/teardown.test.ts
================================================
import { dirname, resolve } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'
import { MessageChannel } from 'node:worker_threads'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('isolated workers call teardown on worker recycle', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/teardown.mjs'),
    minThreads: 1,
    maxThreads: 1,
    isolateWorkers: true,
    teardown: 'namedTeardown',
  })

  for (const _ of [1, 2, 3, 4, 5]) {
    const { port1, port2 } = new MessageChannel()
    const promise = new Promise((resolve) => port2.on('message', resolve))

    const output = await pool.run({ port: port1 }, { transferList: [port1] })
    expect(output).toBe('Output of task #1')

    await expect(promise).resolves.toBe('Teardown of task #1')
  }
})

test('non-isolated workers call teardown on worker recycle', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/teardown.mjs'),
    minThreads: 1,
    maxThreads: 1,
    isolateWorkers: false,
    teardown: 'namedTeardown',
  })

  function unexpectedTeardown(message: string) {
    assert.fail(
      `Teardown should not have been called yet. Received "${message}"`
    )
  }

  const { port1, port2 } = new MessageChannel()

  for (const index of [1, 2, 3, 4, 5]) {
    port2.on('message', unexpectedTeardown)

    const transferList = index === 1 ? [port1] : []

    const output = await pool.run({ port: transferList[0] }, { transferList })
    expect(output).toBe(`Output of task #${index}`)
  }

  port2.off('message', unexpectedTeardown)
  const promise = new Promise((resolve) => port2.on('message', resolve))

  await pool.destroy()
  await expect(promise).resolves.toMatchInlineSnapshot(`"Teardown of task #5"`)
})


================================================
FILE: test/termination.test.ts
================================================
import { dirname, resolve } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))
const cleanups: (() => Promise<unknown>)[] = []

afterEach(async () => {
  await Promise.all(cleanups.splice(0).map((cleanup) => cleanup()))
})

test('termination timeout throws when worker does not terminate in time', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/sleep.js'),
    terminateTimeout: 10,
    minThreads: 1,
    maxThreads: 2,
    isolateWorkers: true,
  })

  expect(pool.threads.length).toBe(1)

  const worker = pool.threads[0]
  expect(worker).toBeTruthy()

  cleanups.push(worker!.terminate.bind(worker))
  worker!.terminate = () => new Promise(() => {})

  await expect(pool.run('default')).rejects.toThrowError(
    'Failed to terminate worker'
  )
})

test('writing to terminating worker does not crash', async () => {
  const listeners: ((msg: any) => void)[] = []

  const pool = new Tinypool({
    runtime: 'child_process',
    filename: resolve(__dirname, 'fixtures/sleep.js'),
    minThreads: 1,
    maxThreads: 1,
  })

  await pool.run(
    {},
    {
      channel: {
        onMessage: (listener) => listeners.push(listener),
        postMessage: () => {},
      },
    }
  )

  const destroyed = pool.destroy()
  listeners.forEach((listener) => listener('Hello from main thread'))

  await destroyed
})

test('recycling workers while closing pool does not crash', async () => {
  const pool = new Tinypool({
    runtime: 'child_process',
    filename: resolve(__dirname, 'fixtures/nested-pool.mjs'),
    isolateWorkers: true,
    minThreads: 1,
    maxThreads: 1,
  })

  await Promise.all(
    (Array(10) as (() => Promise<any>)[])
      .fill(() => pool.run({}))
      .map((fn) => fn())
  )

  await pool.destroy()
})


================================================
FILE: test/uncaught-exception-from-handler.test.ts
================================================
import { dirname, resolve, sep } from 'node:path'
import { Tinypool } from 'tinypool'
import { fileURLToPath } from 'node:url'
import { once } from 'node:events'

const __dirname = dirname(fileURLToPath(import.meta.url))

test('uncaught exception resets Worker', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })

  await expect(pool.run('throw new Error("not_caught")')).rejects.toThrow(
    /not_caught/
  )
})

test('uncaught exception in immediate resets Worker', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })

  await expect(
    pool.run(`
    setImmediate(() => { throw new Error("not_caught") });
    new Promise(() => {}) // act as if we were doing some work
  `)
  ).rejects.toThrow(/not_caught/)
})

test('uncaught exception in immediate after task yields error event', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
    maxThreads: 1,
    useAtomics: false,
  })

  const errorEvent: Promise<Error[]> = once(pool, 'error')

  const taskResult = pool.run(`
    setTimeout(() => { throw new Error("not_caught") }, 500);
    42
  `)

  expect(await taskResult).toBe(42)

  // Hack a bit to make sure we get the 'exit'/'error' events.
  expect(pool.threads.length).toBe(1)
  pool.threads[0]!.ref?.()

  // This is the main aassertion here.
  expect((await errorEvent)[0]!.message).toEqual('not_caught')
})

test('using parentPort is treated as an error', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })
  await expect(
    pool.run(`
    (async () => {
      console.log();
      const parentPort = (await import('worker_threads')).parentPort;
      parentPort.postMessage("some message");
      new Promise(() => {}) /* act as if we were doing some work */
    })()
      `)
  ).rejects.toThrow(/Unexpected message on Worker: 'some message'/)
})

test('no named handler found from worker', async () => {
  const pool = new Tinypool({
    filename: resolve(__dirname, 'fixtures/eval.js'),
  })

  let errorMessage = 'Worker did not throw error'

  try {
    await pool.run('', { name: 'someHandler' })
  } catch (error) {
    errorMessage = error instanceof Error ? error.message : String(error)
  }

  expect(
    errorMessage.replace(process.cwd(), '<process-cwd>').replaceAll(sep, '/')
  ).toMatchInlineSnapshot(
    `"No handler function "someHandler" exported from "<process-cwd>/test/fixtures/eval.js""`
  )
})


================================================
FILE: test/worker-stdio.test.ts
================================================
import * as path from 'node:path'
import { fileURLToPath } from 'node:url'
import { stripVTControlCharacters } from 'node:util'
import { Tinypool } from 'tinypool'

const runtimes = ['worker_threads', 'child_process'] as const
const __dirname = path.dirname(fileURLToPath(import.meta.url))

test.each(runtimes)(
  "worker's stdout and stderr are piped to main thread when { runtime: '%s' }",
  async (runtime) => {
    const pool = createPool({
      runtime,
      minThreads: 1,
      maxThreads: 1,
    })

    const getStdout = captureStandardStream('stdout')
    const getStderr = captureStandardStream('stderr')

    await pool.run({})

    const stdout = getStdout()
    const stderr = getStderr()

    expect(stdout).toMatch('Worker message')

    expect(stderr).toMatch('Worker error')
  }
)

function createPool(options: Partial<Tinypool['options']>) {
  const pool = new Tinypool({
    filename: path.resolve(__dirname, 'fixtures/stdio.mjs'),
    minThreads: 1,
    maxThreads: 1,
    ...options,
  })

  return pool
}

function captureStandardStream(type: 'stdout' | 'stderr') {
  const spy = vi.fn()

  // eslint-disable-next-line @typescript-eslint/unbound-method
  const original = process[type].write
  process[type].write = spy

  return function collect() {
    process[type].write = original
    return stripVTControlCharacters(
      spy.mock.calls.map((call) => call[0]).join('')
    )
  }
}


================================================
FILE: tsconfig.json
================================================
{
  "compilerOptions": {
    "target": "ESNext",
    "module": "ESNext",
    "strict": true,
    "moduleResolution": "Bundler",
    "lib": ["ESNext", "WebWorker"],
    "noUncheckedIndexedAccess": true,
    "baseUrl": ".",
    "noUnusedLocals": true,
    "noUnusedParameters": true,
    "noImplicitReturns": true,
    "noFallthroughCasesInSwitch": true,
    "esModuleInterop": true,
    "resolveJsonModule": true,
    "forceConsistentCasingInFileNames": true,
    "types": ["vitest/globals", "@types/node"],
    "paths": {
      "tinypool": ["./dist/index.d.ts"]
    }
  },
  "include": ["./*.d.ts", "src/**/*", "test/**/*"],
  "exclude": ["node_modules", "dist"]
}


================================================
FILE: tsdown.config.ts
================================================
import { defineConfig } from 'tsdown'

export default defineConfig({
  entry: ['src/index.ts', 'src/entry/*.ts'],
})


================================================
FILE: vitest.config.ts
================================================
import { dirname, resolve } from 'node:path'
import { defineConfig } from 'vitest/config'
import { fileURLToPath } from 'node:url'

const __dirname = dirname(fileURLToPath(import.meta.url))

export default defineConfig({
  resolve: {
    alias: {
      tinypool: resolve(__dirname, './dist/index.js'),
    },
  },
  test: {
    globals: true,
    isolate: false,

    benchmark: {
      include: ['**/**.bench.ts'],
    },
  },
})
Download .txt
gitextract_uc9pk7z0/

├── .clean-publish
├── .github/
│   ├── FUNDING.yml
│   ├── dependabot.yml
│   └── workflows/
│       ├── benchmark.yml
│       ├── nodejs.yml
│       ├── publish.yml
│       └── release-commits.yml
├── .gitignore
├── .npmignore
├── .prettierrc
├── .taprc
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING
├── LICENSE
├── README.md
├── benchmark/
│   ├── fixtures/
│   │   ├── add-process.mjs
│   │   ├── add-worker.mjs
│   │   └── add.mjs
│   ├── isolate-benchmark.bench.ts
│   └── simple.bench.ts
├── eslint.config.js
├── global.d.ts
├── package.json
├── src/
│   ├── common.ts
│   ├── entry/
│   │   ├── process.ts
│   │   ├── utils.ts
│   │   └── worker.ts
│   ├── index.ts
│   ├── runtime/
│   │   ├── process-worker.ts
│   │   └── thread-worker.ts
│   └── utils.ts
├── test/
│   ├── async-context.test.ts
│   ├── atomic.test.ts
│   ├── fixtures/
│   │   ├── child_process-communication.mjs
│   │   ├── esm-export.mjs
│   │   ├── eval.js
│   │   ├── isolated.js
│   │   ├── leak-memory.js
│   │   ├── move.js
│   │   ├── multiple.js
│   │   ├── nested-pool.mjs
│   │   ├── notify-then-sleep-or.js
│   │   ├── resource-limits.js
│   │   ├── simple-isworkerthread.js
│   │   ├── simple-workerdata.js
│   │   ├── sleep.js
│   │   ├── stdio.mjs
│   │   ├── teardown.mjs
│   │   ├── wait-for-notify.js
│   │   ├── wait-for-others.js
│   │   └── workerId.js
│   ├── globals.test.ts
│   ├── idle-timeout.test.ts
│   ├── isolation.test.ts
│   ├── move.test.ts
│   ├── options.test.ts
│   ├── pool-destroy.test.ts
│   ├── resource-limits.test.ts
│   ├── runtime.test.ts
│   ├── simple.test.ts
│   ├── task-queue.test.ts
│   ├── teardown.test.ts
│   ├── termination.test.ts
│   ├── uncaught-exception-from-handler.test.ts
│   └── worker-stdio.test.ts
├── tsconfig.json
├── tsdown.config.ts
└── vitest.config.ts
Download .txt
SYMBOL INDEX (193 symbols across 28 files)

FILE: benchmark/isolate-benchmark.bench.ts
  constant THREADS (line 7) | const THREADS = cpus().length - 1
  constant ROUNDS (line 8) | const ROUNDS = THREADS * 10
  constant ITERATIONS (line 9) | const ITERATIONS = 100
  function execute (line 53) | async function execute() {
  function workerThreadTask (line 66) | async function workerThreadTask() {
  function childProcessTask (line 78) | async function childProcessTask() {

FILE: eslint.config.js
  function defineConfig (line 69) | function defineConfig(config) {

FILE: global.d.ts
  type Process (line 3) | interface Process {

FILE: src/common.ts
  type TinypoolChannel (line 5) | interface TinypoolChannel {
  type TinypoolWorker (line 16) | interface TinypoolWorker {
  type TinypoolWorkerMessage (line 43) | interface TinypoolWorkerMessage<
  type StartupMessage (line 50) | interface StartupMessage {
  type RequestMessage (line 58) | interface RequestMessage {
  type ReadyMessage (line 65) | interface ReadyMessage {
  type ResponseMessage (line 69) | interface ResponseMessage {
  type TinypoolPrivateData (line 76) | interface TinypoolPrivateData {
  type TinypoolData (line 80) | type TinypoolData = [TinypoolPrivateData, any] // [{ ... }, workerData]
  function isTransferable (line 90) | function isTransferable(value: any): boolean {
  function isMovable (line 101) | function isMovable(value: any): boolean {
  function markMovable (line 105) | function markMovable(value: object): void {
  type Transferable (line 114) | interface Transferable {
  type Task (line 119) | interface Task {
  type TaskQueue (line 124) | interface TaskQueue {
  function isTaskQueue (line 132) | function isTaskQueue(value: any): boolean {

FILE: src/entry/process.ts
  type IncomingMessage (line 11) | type IncomingMessage =
  type OutgoingMessage (line 15) | type OutgoingMessage =
  function onMessage (line 65) | async function onMessage(message: IncomingMessage & { source: 'port' }) {
  function serializeError (line 110) | function serializeError(error: unknown) {

FILE: src/entry/utils.ts
  function getImportESM (line 9) | function getImportESM() {
  type Handler (line 21) | type Handler = Function
  function getHandler (line 26) | async function getHandler(
  function throwInNextTick (line 72) | function throwInNextTick(error: Error) {

FILE: src/entry/worker.ts
  function atomicsWaitLoop (line 61) | function atomicsWaitLoop(port: MessagePort, sharedBuffer: Int32Array) {
  function onMessage (line 91) | function onMessage(

FILE: src/index.ts
  type Process (line 42) | interface Process {
  type AbortSignalEventTargetAddOptions (line 56) | interface AbortSignalEventTargetAddOptions {
  type AbortSignalEventTarget (line 60) | interface AbortSignalEventTarget {
  type AbortSignalEventEmitter (line 69) | interface AbortSignalEventEmitter {
  type AbortSignalAny (line 73) | type AbortSignalAny = AbortSignalEventTarget | AbortSignalEventEmitter
  function onabort (line 74) | function onabort(abortSignal: AbortSignalAny, listener: () => void) {
  class AbortError (line 81) | class AbortError extends Error {
    method constructor (line 82) | constructor() {
    method name (line 86) | get name() {
  class CancelError (line 91) | class CancelError extends Error {
    method constructor (line 92) | constructor() {
    method name (line 96) | get name() {
  type ResourceLimits (line 101) | type ResourceLimits = Worker extends {
  class ArrayTaskQueue (line 107) | class ArrayTaskQueue implements TaskQueue {
    method size (line 110) | get size() {
    method shift (line 114) | shift(): Task | null {
    method push (line 118) | push(task: Task): void {
    method remove (line 122) | remove(task: Task): void {
    method cancel (line 128) | cancel(): void {
  type Options (line 136) | interface Options {
  type FilledOptions (line 160) | interface FilledOptions extends Options {
  type RunOptions (line 187) | interface RunOptions {
  type FilledRunOptions (line 196) | interface FilledRunOptions extends RunOptions {
  class DirectlyTransferable (line 210) | class DirectlyTransferable implements Transferable {
    method constructor (line 212) | constructor(value: object) {
    method [kTransferable] (line 216) | get [kTransferable](): object {
    method [kValue] (line 220) | get [kValue](): object {
  class ArrayBufferViewTransferable (line 225) | class ArrayBufferViewTransferable implements Transferable {
    method constructor (line 227) | constructor(view: ArrayBufferView) {
    method [kTransferable] (line 231) | get [kTransferable](): object {
    method [kValue] (line 235) | get [kValue](): object {
  type TaskCallback (line 242) | type TaskCallback = (err: Error, result: any) => void
  type TransferList (line 246) | type TransferList = MessagePort extends {
  type TransferListItem (line 251) | type TransferListItem = TransferList extends (infer T)[] ? T : never
  function maybeFileURLToPath (line 253) | function maybeFileURLToPath(filename: string): string {
  class TaskInfo (line 261) | class TaskInfo extends AsyncResource implements Task {
    method constructor (line 276) | constructor(
    method releaseTask (line 315) | releaseTask(): any {
    method done (line 321) | done(err: unknown | null, result?: any): void {
    method [kQueueOptions] (line 338) | get [kQueueOptions](): object | null {
  method markAsReady (line 346) | markAsReady(): void {
  method isReady (line 355) | isReady(): boolean {
  method onReady (line 359) | onReady(fn: () => void) {
  class AsynchronouslyCreatedResourcePool (line 370) | class AsynchronouslyCreatedResourcePool<
    method constructor (line 378) | constructor(maximumUsage: number) {
    method add (line 383) | add(item: T) {
    method delete (line 395) | delete(item: T) {
    method findAvailable (line 400) | findAvailable(): T | null {
    method size (line 419) | get size() {
    method maybeAvailable (line 423) | maybeAvailable(item: T) {
    method onAvailable (line 432) | onAvailable(fn: (item: T) => void) {
  method [Symbol.iterator] (line 414) | *[Symbol.iterator]() {
  type ResponseCallback (line 437) | type ResponseCallback = (response: ResponseMessage) => void
  class WorkerInfo (line 448) | class WorkerInfo extends AsynchronouslyCreatedResource {
    method constructor (line 463) | constructor(
    method destroy (line 489) | async destroy(timeout?: number): Promise<void> {
    method clearIdleTimeout (line 542) | clearIdleTimeout(): void {
    method ref (line 549) | ref(): WorkerInfo {
    method unref (line 554) | unref(): WorkerInfo {
    method _handleResponse (line 561) | _handleResponse(message: ResponseMessage): void {
    method postTask (line 572) | postTask(taskInfo: TaskInfo) {
    method processPendingMessages (line 604) | processPendingMessages() {
    method isRunningAbortableTask (line 624) | isRunningAbortableTask(): boolean {
    method currentUsage (line 632) | currentUsage(): number {
  class ThreadPool (line 638) | class ThreadPool {
    method constructor (line 651) | constructor(publicInterface: Tinypool, options: Options) {
    method _ensureEnoughWorkersForTaskQueue (line 691) | _ensureEnoughWorkersForTaskQueue(): void {
    method _ensureMaximumWorkers (line 700) | _ensureMaximumWorkers(): void {
    method _ensureMinimumWorkers (line 706) | _ensureMinimumWorkers(): void {
    method _addNewWorker (line 712) | _addNewWorker(): void {
    method _processPendingMessages (line 856) | _processPendingMessages() {
    method _removeWorker (line 871) | _removeWorker(workerInfo: WorkerInfo): Promise<void> {
    method _onWorkerAvailable (line 879) | _onWorkerAvailable(workerInfo: WorkerInfo): void {
    method runTask (line 915) | runTask(task: any, options: RunOptions): Promise<any> {
    method shouldRecycleWorker (line 1046) | shouldRecycleWorker(taskInfo?: TaskInfo): boolean {
    method pendingCapacity (line 1070) | pendingCapacity(): number {
    method _maybeDrain (line 1076) | _maybeDrain() {
    method destroy (line 1082) | async destroy() {
    method recycleWorkers (line 1104) | async recycleWorkers(options: Pick<Options, 'runtime'> = {}) {
  class Tinypool (line 1141) | class Tinypool extends EventEmitterAsyncResource {
    method constructor (line 1144) | constructor(options: Options = {}) {
    method run (line 1182) | run(task: any, options: RunOptions = kDefaultRunOptions) {
    method destroy (line 1195) | async destroy() {
    method options (line 1200) | get options(): FilledOptions {
    method threads (line 1204) | get threads(): TinypoolWorker[] {
    method queueSize (line 1212) | get queueSize(): number {
    method cancelPendingTasks (line 1217) | cancelPendingTasks() {
    method recycleWorkers (line 1222) | async recycleWorkers(options: Pick<Options, 'runtime'> = {}) {
    method completed (line 1226) | get completed(): number {
    method duration (line 1230) | get duration(): number {
    method isWorkerThread (line 1234) | static get isWorkerThread(): boolean {
    method workerData (line 1238) | static get workerData(): any {
    method version (line 1242) | static get version(): string {
    method move (line 1249) | static move(
    method transferableSymbol (line 1270) | static get transferableSymbol() {
    method valueSymbol (line 1274) | static get valueSymbol() {
    method queueOptionsSymbol (line 1278) | static get queueOptionsSymbol() {

FILE: src/runtime/process-worker.ts
  constant SIGKILL_TIMEOUT (line 11) | const SIGKILL_TIMEOUT = 1000
  class ProcessWorker (line 13) | class ProcessWorker implements TinypoolWorker {
    method initialize (line 23) | initialize(options: Parameters<TinypoolWorker['initialize']>[0]) {
    method terminate (line 52) | async terminate() {
    method setChannel (line 71) | setChannel(channel: TinypoolChannel) {
    method send (line 85) | private send(message: Parameters<NonNullable<(typeof process)['send']>...
    method postMessage (line 91) | postMessage(message: any, transferListItem?: Readonly<TransferListItem...
    method on (line 117) | on(event: string, callback: (...args: any[]) => void) {
    method once (line 136) | once(event: string, callback: (...args: any[]) => void) {
    method emit (line 140) | emit(event: string, ...data: any[]) {
    method ref (line 144) | ref() {
    method unref (line 148) | unref() {
  function hasUnref (line 168) | function hasUnref(stream: null | object): stream is { unref: () => void } {

FILE: src/runtime/thread-worker.ts
  class ThreadWorker (line 5) | class ThreadWorker implements TinypoolWorker {
    method initialize (line 12) | initialize(options: Parameters<TinypoolWorker['initialize']>[0]) {
    method terminate (line 20) | async terminate() {
    method postMessage (line 28) | postMessage(message: any, transferListItem?: Readonly<TransferListItem...
    method on (line 32) | on(event: string, callback: (...args: any[]) => void) {
    method once (line 36) | once(event: string, callback: (...args: any[]) => void) {
    method emit (line 40) | emit(event: string, ...data: any[]) {
    method ref (line 44) | ref() {
    method unref (line 48) | unref() {
    method setChannel (line 52) | setChannel(channel: TinypoolChannel) {

FILE: src/utils.ts
  function stdout (line 1) | function stdout(): NodeJS.WriteStream | undefined {
  function stderr (line 6) | function stderr(): NodeJS.WriteStream | undefined {

FILE: test/async-context.test.ts
  method init (line 16) | init(id, type) {
  method before (line 22) | before(id) {
  method after (line 25) | after(id) {
  method promiseResolve (line 28) | promiseResolve() {

FILE: test/atomic.test.ts
  function popcount8 (line 62) | function popcount8(v: number): number {

FILE: test/fixtures/child_process-communication.mjs
  function run (line 1) | async function run(task) {

FILE: test/fixtures/leak-memory.js
  function run (line 11) | function run(bytes) {

FILE: test/fixtures/multiple.js
  function a (line 3) | function a() {
  function b (line 7) | function b() {

FILE: test/fixtures/nested-pool.mjs
  function nestedPool (line 4) | async function nestedPool() {
  function entrypoint (line 17) | function entrypoint() {}

FILE: test/fixtures/stdio.mjs
  function run (line 1) | function run() {

FILE: test/fixtures/teardown.mjs
  function task (line 8) | function task(options) {
  function namedTeardown (line 15) | async function namedTeardown() {

FILE: test/globals.test.ts
  function createPool (line 23) | function createPool(options: Partial<Tinypool['options']>) {

FILE: test/isolation.test.ts
  function getThreadIds (line 17) | function getThreadIds() {
  function getThreadIds (line 47) | function getThreadIds() {
  function times (line 77) | function times(count: number) {
  function intersection (line 83) | function intersection<T>(a: T[], b: T[]) {

FILE: test/move.test.ts
  method [transferableSymbol] (line 14) | get [transferableSymbol](): object {
  method [valueSymbol] (line 17) | get [valueSymbol](): object {
  method [transferableSymbol] (line 36) | get [transferableSymbol](): object {
  method [valueSymbol] (line 39) | get [valueSymbol](): object {

FILE: test/pool-destroy.test.ts
  method init (line 42) | init(asyncId, type) {
  method destroy (line 47) | destroy(asyncId) {

FILE: test/runtime.test.ts
  function createPool (line 351) | function createPool(options: Partial<Tinypool['options']>) {

FILE: test/simple.test.ts
  function addWorkerId (line 261) | function addWorkerId(workerId: number) {

FILE: test/task-queue.test.ts
  class CustomTaskPool (line 238) | class CustomTaskPool implements TaskQueue {
    method size (line 241) | get size(): number {
    method shift (line 246) | shift(): Task | null {
    method push (line 251) | push(task: Task): void {
    method remove (line 267) | remove(task: Task): void {
    method cancel (line 272) | cancel() {}
  function makeTask (line 283) | function makeTask(task: any, option: any) {

FILE: test/teardown.test.ts
  function unexpectedTeardown (line 37) | function unexpectedTeardown(message: string) {

FILE: test/worker-stdio.test.ts
  function createPool (line 32) | function createPool(options: Partial<Tinypool['options']>) {
  function captureStandardStream (line 43) | function captureStandardStream(type: 'stdout' | 'stderr') {
Condensed preview — 68 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (150K chars).
[
  {
    "path": ".clean-publish",
    "chars": 26,
    "preview": "{\n    \"cleanDocs\": true\n}\n"
  },
  {
    "path": ".github/FUNDING.yml",
    "chars": 49,
    "preview": "open_collective: aslemammad\ngithub: [aslemammad]\n"
  },
  {
    "path": ".github/dependabot.yml",
    "chars": 177,
    "preview": "version: 2\nupdates:\n  - package-ecosystem: \"npm\" # See documentation for possible values\n    directory: \"/\" # Location o"
  },
  {
    "path": ".github/workflows/benchmark.yml",
    "chars": 636,
    "preview": "on: [workflow_dispatch]\n\nname: Benchmark\n\njobs:\n  test:\n    name: Test\n    strategy:\n      fail-fast: false\n      matrix"
  },
  {
    "path": ".github/workflows/nodejs.yml",
    "chars": 771,
    "preview": "name: CI\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n  workflow_dispatch:\n\njobs:\n  test:\n    name: Test\n    "
  },
  {
    "path": ".github/workflows/publish.yml",
    "chars": 1408,
    "preview": "name: Publish\n\non:\n  workflow_dispatch:\n    inputs:\n      release-type:\n        type: choice\n        description: Type o"
  },
  {
    "path": ".github/workflows/release-commits.yml",
    "chars": 518,
    "preview": "name: Publish Any Commit\n\non: [push, pull_request]\n\njobs:\n  publish:\n    name: Publish commit\n    runs-on: ubuntu-latest"
  },
  {
    "path": ".gitignore",
    "chars": 53,
    "preview": ".nyc_output\n.vscode\n.idea\nnode_modules\ndist\ncoverage\n"
  },
  {
    "path": ".npmignore",
    "chars": 56,
    "preview": ".github\n.nyc_output\npackage-lock.json\ncoverage\nexamples\n"
  },
  {
    "path": ".prettierrc",
    "chars": 92,
    "preview": "{\n  \"endOfLine\": \"auto\",\n  \"singleQuote\": true,\n  \"semi\": false,\n  \"trailingComma\": \"es5\"\n}\n"
  },
  {
    "path": ".taprc",
    "chars": 276,
    "preview": "check-coverage: false\ncolor: true\ncoverage: true\ncoverage-report:\n  - html\n  - text\njobs: 2\nno-browser: true\ntest-env: T"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "chars": 5266,
    "preview": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nWe as members, contributors, and leaders pledge to make participa"
  },
  {
    "path": "CONTRIBUTING",
    "chars": 1236,
    "preview": "# Piscina is an OPEN Open Source Project\n\n## What?\n\nIndividuals making significant and valuable contributions are given "
  },
  {
    "path": "LICENSE",
    "chars": 1209,
    "preview": "The MIT License (MIT)\n\nCopyright (c) 2020 James M Snell and the Piscina contributors\n\nPiscina contributors listed at htt"
  },
  {
    "path": "README.md",
    "chars": 8111,
    "preview": "# Tinypool - the node.js worker pool 🧵\n\n> Piscina: A fast, efficient Node.js Worker Thread Pool implementation\n\nTinypool"
  },
  {
    "path": "benchmark/fixtures/add-process.mjs",
    "chars": 98,
    "preview": "import add from './add.mjs'\n\nprocess.on('message', (message) => {\n  process.send(add(message))\n})\n"
  },
  {
    "path": "benchmark/fixtures/add-worker.mjs",
    "chars": 161,
    "preview": "import { parentPort } from 'node:worker_threads'\n\nimport add from './add.mjs'\n\nparentPort.on('message', (message) => {\n "
  },
  {
    "path": "benchmark/fixtures/add.mjs",
    "chars": 35,
    "preview": "export default ({ a, b }) => a + b\n"
  },
  {
    "path": "benchmark/isolate-benchmark.bench.ts",
    "chars": 2152,
    "preview": "import { bench } from 'vitest'\nimport { cpus } from 'node:os'\nimport { Worker } from 'node:worker_threads'\nimport { fork"
  },
  {
    "path": "benchmark/simple.bench.ts",
    "chars": 396,
    "preview": "import { bench } from 'vitest'\nimport Tinypool from '../dist/index'\n\nbench(\n  'simple',\n  async () => {\n    const pool ="
  },
  {
    "path": "eslint.config.js",
    "chars": 2002,
    "preview": "import { readFileSync } from 'node:fs'\nimport eslint from '@eslint/js'\nimport tseslint from 'typescript-eslint'\nimport e"
  },
  {
    "path": "global.d.ts",
    "chars": 280,
    "preview": "// only for tsdown build, excluded from the final tgz\ndeclare namespace NodeJS {\n  interface Process {\n    __tinypool_st"
  },
  {
    "path": "package.json",
    "chars": 1500,
    "preview": "{\n  \"name\": \"tinypool\",\n  \"type\": \"module\",\n  \"version\": \"2.1.0\",\n  \"packageManager\": \"pnpm@9.0.6\",\n  \"description\": \"A "
  },
  {
    "path": "src/common.ts",
    "chars": 3689,
    "preview": "import type { MessagePort, TransferListItem } from 'node:worker_threads'\nimport type { SerializationType } from 'node:ch"
  },
  {
    "path": "src/entry/process.ts",
    "chars": 3202,
    "preview": "import { stderr, stdout } from '../utils'\nimport {\n  type ReadyMessage,\n  type RequestMessage,\n  type ResponseMessage,\n "
  },
  {
    "path": "src/entry/utils.ts",
    "chars": 2333,
    "preview": "import { pathToFileURL } from 'node:url'\n\n// Get `import(x)` as a function that isn't transpiled to `require(x)` by\n// T"
  },
  {
    "path": "src/entry/worker.ts",
    "chars": 5157,
    "preview": "import {\n  parentPort,\n  type MessagePort,\n  receiveMessageOnPort,\n  workerData as tinypoolData,\n} from 'node:worker_thr"
  },
  {
    "path": "src/index.ts",
    "chars": 34448,
    "preview": "import {\n  MessageChannel,\n  type MessagePort,\n  receiveMessageOnPort,\n} from 'node:worker_threads'\nimport type { Serial"
  },
  {
    "path": "src/runtime/process-worker.ts",
    "chars": 4468,
    "preview": "import { type ChildProcess, fork } from 'node:child_process'\nimport { MessagePort, type TransferListItem } from 'node:wo"
  },
  {
    "path": "src/runtime/thread-worker.ts",
    "chars": 1872,
    "preview": "import { fileURLToPath } from 'node:url'\nimport { type TransferListItem, Worker } from 'node:worker_threads'\nimport { ty"
  },
  {
    "path": "src/utils.ts",
    "chars": 373,
    "preview": "export function stdout(): NodeJS.WriteStream | undefined {\n  // @ts-expect-error Node.js maps process.stdout to console."
  },
  {
    "path": "test/async-context.test.ts",
    "chars": 1043,
    "preview": "import { createHook, executionAsyncId } from 'node:async_hooks'\nimport { Tinypool } from 'tinypool'\nimport { dirname, re"
  },
  {
    "path": "test/atomic.test.ts",
    "chars": 2785,
    "preview": "import Tinypool from 'tinypool'\nimport { dirname, resolve } from 'node:path'\nimport { fileURLToPath } from 'node:url'\n\nc"
  },
  {
    "path": "test/fixtures/child_process-communication.mjs",
    "chars": 359,
    "preview": "export default async function run(task) {\n  let resolve = () => {}\n  const promise = new Promise((r) => (resolve = r))\n\n"
  },
  {
    "path": "test/fixtures/esm-export.mjs",
    "chars": 55,
    "preview": "export default function (code) {\n  return eval(code)\n}\n"
  },
  {
    "path": "test/fixtures/eval.js",
    "chars": 55,
    "preview": "export default function (code) {\n  return eval(code)\n}\n"
  },
  {
    "path": "test/fixtures/isolated.js",
    "chars": 44,
    "preview": "let count = 0\n\nexport default () => count++\n"
  },
  {
    "path": "test/fixtures/leak-memory.js",
    "chars": 642,
    "preview": "/** Enable to see memory leak logging */\nconst logOutput = false\n\n// eslint-disable-next-line prefer-const -- intentiona"
  },
  {
    "path": "test/fixtures/move.js",
    "chars": 266,
    "preview": "import Tinypool from '../../dist/index.js'\nimport assert from 'node:assert'\nimport { types } from 'node:util'\n\nexport de"
  },
  {
    "path": "test/fixtures/multiple.js",
    "chars": 107,
    "preview": "'use strict'\n\nexport function a() {\n  return 'a'\n}\n\nexport function b() {\n  return 'b'\n}\n\nexport default a\n"
  },
  {
    "path": "test/fixtures/nested-pool.mjs",
    "chars": 427,
    "preview": "import { cpus } from 'node:os'\nimport { Tinypool } from 'tinypool'\n\nexport default async function nestedPool() {\n  const"
  },
  {
    "path": "test/fixtures/notify-then-sleep-or.js",
    "chars": 398,
    "preview": "// Set the index-th bith in i32array[0], then wait for it to be un-set again.\nexport default function ({ i32array, index"
  },
  {
    "path": "test/fixtures/resource-limits.js",
    "chars": 103,
    "preview": "'use strict'\n\nexport default () => {\n  const array = []\n  while (true) {\n    array.push([array])\n  }\n}\n"
  },
  {
    "path": "test/fixtures/simple-isworkerthread.js",
    "chars": 175,
    "preview": "import Tinypool from '../../dist/index.js'\nimport assert from 'node:assert'\n\nassert.strictEqual(Tinypool.isWorkerThread,"
  },
  {
    "path": "test/fixtures/simple-workerdata.js",
    "chars": 172,
    "preview": "import Tinypool from '../../dist/index.js'\nimport assert from 'node:assert'\n\nassert.strictEqual(Tinypool.workerData, 'AB"
  },
  {
    "path": "test/fixtures/sleep.js",
    "chars": 253,
    "preview": "import { promisify } from 'node:util'\nconst sleep = promisify(setTimeout)\n\nconst buf = new Uint32Array(new SharedArrayBu"
  },
  {
    "path": "test/fixtures/stdio.mjs",
    "chars": 114,
    "preview": "export default function run() {\n  process.stdout.write('Worker message')\n  process.stderr.write('Worker error')\n}\n"
  },
  {
    "path": "test/fixtures/teardown.mjs",
    "chars": 365,
    "preview": "import { setTimeout } from 'node:timers/promises'\n\nlet state = 0\n\n/** @type {import(\"node:worker_threads\").MessagePort }"
  },
  {
    "path": "test/fixtures/wait-for-notify.js",
    "chars": 143,
    "preview": "export default function (i32array) {\n  Atomics.wait(i32array, 0, 0)\n  Atomics.store(i32array, 0, -1)\n  Atomics.notify(i3"
  },
  {
    "path": "test/fixtures/wait-for-others.js",
    "chars": 309,
    "preview": "import { threadId } from 'node:worker_threads'\n\nexport default function ([i32array, n]) {\n  Atomics.add(i32array, 0, 1)\n"
  },
  {
    "path": "test/fixtures/workerId.js",
    "chars": 177,
    "preview": "import { workerId } from '../../dist/index.js'\n\nexport default async ({ slow }) => {\n  if (slow) {\n    await new Promise"
  },
  {
    "path": "test/globals.test.ts",
    "chars": 883,
    "preview": "import * as path from 'node:path'\nimport { fileURLToPath } from 'node:url'\nimport { Tinypool } from 'tinypool'\n\nconst __"
  },
  {
    "path": "test/idle-timeout.test.ts",
    "chars": 1248,
    "preview": "import { promisify } from 'node:util'\nimport { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\ni"
  },
  {
    "path": "test/isolation.test.ts",
    "chars": 2544,
    "preview": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url"
  },
  {
    "path": "test/move.test.ts",
    "chars": 3670,
    "preview": "import { Tinypool, isMovable, markMovable, isTransferable } from 'tinypool'\nimport { types } from 'node:util'\nimport { M"
  },
  {
    "path": "test/options.test.ts",
    "chars": 1471,
    "preview": "import { expect, test, vi } from 'vitest'\n\nlet Tinypool: typeof import('tinypool').default\nconst cpuCount = vi.hoisted(("
  },
  {
    "path": "test/pool-destroy.test.ts",
    "chars": 1728,
    "preview": "import { createHook } from 'node:async_hooks'\nimport { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tin"
  },
  {
    "path": "test/resource-limits.test.ts",
    "chars": 3118,
    "preview": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url"
  },
  {
    "path": "test/runtime.test.ts",
    "chars": 10315,
    "preview": "import EventEmitter from 'node:events'\nimport * as path from 'node:path'\nimport { fileURLToPath } from 'node:url'\nimport"
  },
  {
    "path": "test/simple.test.ts",
    "chars": 8955,
    "preview": "import EventEmitter from 'node:events'\nimport { cpus } from 'node:os'\nimport { dirname, resolve } from 'node:path'\nimpor"
  },
  {
    "path": "test/task-queue.test.ts",
    "chars": 8709,
    "preview": "import { dirname, resolve } from 'node:path'\nimport { Tinypool, type Task, type TaskQueue } from 'tinypool'\nimport { fil"
  },
  {
    "path": "test/teardown.test.ts",
    "chars": 1816,
    "preview": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url"
  },
  {
    "path": "test/termination.test.ts",
    "chars": 1874,
    "preview": "import { dirname, resolve } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'node:url"
  },
  {
    "path": "test/uncaught-exception-from-handler.test.ts",
    "chars": 2541,
    "preview": "import { dirname, resolve, sep } from 'node:path'\nimport { Tinypool } from 'tinypool'\nimport { fileURLToPath } from 'nod"
  },
  {
    "path": "test/worker-stdio.test.ts",
    "chars": 1413,
    "preview": "import * as path from 'node:path'\nimport { fileURLToPath } from 'node:url'\nimport { stripVTControlCharacters } from 'nod"
  },
  {
    "path": "tsconfig.json",
    "chars": 665,
    "preview": "{\n  \"compilerOptions\": {\n    \"target\": \"ESNext\",\n    \"module\": \"ESNext\",\n    \"strict\": true,\n    \"moduleResolution\": \"Bu"
  },
  {
    "path": "tsdown.config.ts",
    "chars": 117,
    "preview": "import { defineConfig } from 'tsdown'\n\nexport default defineConfig({\n  entry: ['src/index.ts', 'src/entry/*.ts'],\n})\n"
  },
  {
    "path": "vitest.config.ts",
    "chars": 431,
    "preview": "import { dirname, resolve } from 'node:path'\nimport { defineConfig } from 'vitest/config'\nimport { fileURLToPath } from "
  }
]

About this extraction

This page contains the full source code of the Aslemammad/tinypool GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 68 files (138.2 KB), approximately 37.5k tokens, and a symbol index with 193 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!