[
  {
    "path": ".babelrc",
    "content": "{\n    \"plugins\": [\n        \"@babel/proposal-object-rest-spread\",\n        [\"@babel/transform-react-jsx\", {\n            \"pragma\": \"createAudioElement\"\n        }]\n    ],\n    \"env\": {\n        \"test\": {\n            \"plugins\": [\n                \"@babel/transform-modules-commonjs\"\n            ]\n        }\n    }\n}\n"
  },
  {
    "path": ".editorconfig",
    "content": "root = true\n\n# Unix-style newlines with a newline ending every file\n[*]\nend_of_line = lf\ninsert_final_newline = true\n\n# Matches multiple files with brace expansion notation\n# Set default charset\n[*.{js,jsx,html,scss}]\ncharset = utf-8\nindent_style = space\nindent_size = 4\ntrim_trailing_whitespace = true\n"
  },
  {
    "path": ".eslintrc.js",
    "content": "module.exports = {\n    \"globals\": {\n        \"onAudioContextResumed\": \"readable\",\n    },\n    \"env\": {\n        \"browser\": true,\n        \"es6\": true,\n        \"jest\": true,\n        \"node\": true,\n    },\n    \"extends\": \"eslint:recommended\",\n    \"parserOptions\": {\n        \"ecmaFeatures\": {\n            \"jsx\": true\n        },\n        \"ecmaVersion\": 2018,\n        \"sourceType\": \"module\"\n    },\n    \"plugins\": [\n        \"react\"\n    ],\n    \"settings\": {\n        \"react\": {\n            \"pragma\": \"createAudioElement\"\n        }\n    },\n    \"rules\": {\n        \"indent\": [\n            \"error\",\n            4\n        ],\n        \"linebreak-style\": [\n            \"error\",\n            \"unix\"\n        ],\n        \"quotes\": [\n            \"error\",\n            \"single\"\n        ],\n        \"semi\": [\n            \"error\",\n            \"always\"\n        ],\n        \"react/jsx-uses-react\": 1,\n        \"react/jsx-uses-vars\": 1,\n    }\n};\n"
  },
  {
    "path": ".gitignore",
    "content": "dist\nnode_modules\ncoverage\n.nyc_output\n.DS_Store"
  },
  {
    "path": ".npmignore",
    "content": "src\nexample\n.babelrc\n.eslintignore\n.eslintrc.js\n.nvmrc\nREADME.md\nrollup.config.js\n.travis.yml\n"
  },
  {
    "path": ".nvmrc",
    "content": "v8.11.0\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: node_js\nnode_js:\n  - \"8\"\n  - \"10\"\n\nscript: npm run test:ci\n"
  },
  {
    "path": "README.md",
    "content": "# Wax\n\n[![Travis CI status](https://api.travis-ci.org/jamesseanwright/wax.svg?branch=master)](https://travis-ci.org/jamesseanwright/wax) [![Code coverage status](https://coveralls.io/repos/github/jamesseanwright/wax/badge.svg?branch=master)](https://coveralls.io/github/jamesseanwright/wax?branch=master) [![npm version](https://badge.fury.io/js/wax-core.svg)](https://www.npmjs.com/package/wax-core)\n\nAn experimental, JSX-compatible renderer for the Web Audio API. I wrote Wax for my [Manchester Web Meetup](https://www.meetup.com/Manchester-Web-Meetup) talk, [_Manipulating the Web Audio API with JSX and Custom Renderers_](https://www.youtube.com/watch?v=IeuuBKBb4Wg).\n\nWhile it has decent test coverage and is stable, I still deem this to be a work-in-progress. **Use in production at your own risk!**\n\n```jsx\n/** @jsx createAudioElement */\n\nimport {\n    createAudioElement,\n    renderAudioGraph,\n    AudioGraph,\n    Oscillator,\n    Gain,\n    StereoPanner,\n    Destination,\n    setValueAtTime,\n    exponentialRampToValueAtTime,\n} from 'wax-core';\n\nrenderAudioGraph(\n    <AudioGraph>\n        <Oscillator\n            frequency={[\n                setValueAtTime(200, 0),\n                exponentialRampToValueAtTime(800, 3),\n            ]}\n            type=\"square\"\n            endTime={3}\n        />\n        <Gain gain={0.2} />\n        <StereoPanner pan={-1} />\n        <Destination />\n    </AudioGraph>\n);\n```\n\n## Example Apps\n\nConsult the [example](https://github.com/jamesseanwright/wax/tree/master/example) directory for a few small example apps that use Wax. The included [`README`](https://github.com/jamesseanwright/wax/blob/master/example/README.md) summarises them and details how they can be built and ran.\n\n## Documentation\n\n* [Introduction](https://github.com/jamesseanwright/wax/blob/master/docs/000-introduction.md)\n* [Getting Started](https://github.com/jamesseanwright/wax/blob/master/docs/001-getting-started.md)\n* [Manipulating Audio Parameters](https://github.com/jamesseanwright/wax/blob/master/docs/002-audio-parameters.md)\n* [Building Complex Graphs with `<Aggregation />`s](https://github.com/jamesseanwright/wax/blob/master/docs/003-aggregations.md)\n* [Updating Rendered `<AudioGraph />`s](https://github.com/jamesseanwright/wax/blob/master/docs/004-updating-audio-graphs.md)\n* [Interop with React](https://github.com/jamesseanwright/wax/blob/master/docs/005-interop-with-react.md)\n* [API Reference](https://github.com/jamesseanwright/wax/blob/master/docs/006-api-reference.md)\n* [Local Development](https://github.com/jamesseanwright/wax/blob/master/docs/007-local-development.md)\n"
  },
  {
    "path": "docs/000-introduction.md",
    "content": "# Introduction\n\n[Web Audio](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) is a exciting capability that allows developers to generate and manipulate sound in real-time (and to [render it for later use](https://developer.mozilla.org/en-US/docs/Web/API/OfflineAudioContext)), requiring nothing beyond JavaScript and a built-in browser API. Its audio graph model is conceptually logical, but writing imperative connection code can prove tedious, especially for larger graphs:\n\n```js\noscillator.connect(gain);\ngain.connect(stereoPanner);\nbufferSource.connect(stereoPanner);\nstereoPanner.connect(context.destination);\n```\n\n[There are ways of mitigating this \"fatigue\"](https://github.com/learnable-content/web-audio-api-mini-course/blob/lesson1.3/complete/index.js#L66), but what if we could declare our audio graph, and its components, as a tree of elements using [JSX](https://reactjs.org/docs/introducing-jsx.html)? Can we thus avoid directly specifying this connection code? Wax is an attempt at answering these questions.\n\nTake the example found in the main README:\n\n```jsx\nrenderAudioGraph(\n    <AudioGraph>\n        <Oscillator\n            frequency={[\n                setValueAtTime(200, 0),\n                exponentialRampToValueAtTime(800, 3),\n            ]}\n            type=\"square\"\n            endTime={3}\n        />\n        <Gain gain={0.2} />\n        <StereoPanner pan={-1} />\n        <Destination />\n    </AudioGraph>\n);\n```\n\nThis is analogous to:\n\n```js\nconst context = new AudioContext();\nconst oscillator = context.createOscillator();\nconst gain = context.createGain();\nconst stereoPanner = context.createStereoPanner();\nconst getTime = time => context.currentTime + time;\n\noscillator.type = 'square';\noscillator.frequency.value = 200;\noscillator.frequency.exponentialRampToValueAtTime(800, getTime(3));\ngain.gain.value = 0.2;\nstereoPanner.pan.value = -0.2\n\noscillator.connect(gain);\ngain.connect(stereoPanner);\nstereoPanner.connect(context.destination);\n\noscillator.start();\noscillator.stop(getTime(3));\n```\n\nAs React abstracts manual, imperative DOM operations, Wax abstracts manual, imperative Web Audio operations.\n\nBut how does Wax connect these nodes? The children of the root `<AudioGraph />` element **will be connected to one another in the order in which they're declared**. In our case:\n\n1. `<Oscillator />` will be rendered and connected to the rendered `<Gain />`\n2. `<Gain />` will be connected to `<StereoPanner />`\n3. `<StereoPanner />` will be connected to `<Destination />` (`Destination` is a convenience component to consistently handle connections to the audio context's `destination` node)\n"
  },
  {
    "path": "docs/001-getting-started.md",
    "content": "# Getting Started\n\nThe entirety of Wax is available in a single package from npm, named `wax-core`. Install it into your project with:\n\n```shell\nnpm i --save wax-core\n```\n\nCreate a single entry point, `simple.jsx`, and replicate the following imports and audio graph.\n\n```js\nimport {\n    createAudioElement,\n    renderAudioGraph,\n    AudioGraph,\n    Oscillator,\n    Gain,\n    StereoPanner,\n    Destination,\n    setValueAtTime,\n    exponentialRampToValueAtTime,\n} from 'wax-core';\n\nrenderAudioGraph(\n    <AudioGraph>\n        <Oscillator\n            frequency={[\n                setValueAtTime(200, 0),\n                exponentialRampToValueAtTime(800, 3),\n            ]}\n            type=\"square\"\n            endTime={3}\n        />\n        <Gain gain={0.2} />\n        <StereoPanner pan={-0.2} />\n        <Destination />\n    </AudioGraph>\n);\n```\n\nWhile `<AudioGraph />` does nothing special at present, it may manipulate its children in future versions of Wax. Please ensure you always specify an `AudioGraph` as the root element of the tree.\n\nBut how do we actually build this? How can we instruct a transpiler that these JSX constructs should specifically target Wax? Firstly, let's look at the first binding we import from `wax-core`:\n\n```js\nimport {\n    createAudioElement,\n    ...\n} from 'wax-core';\n```\n\nWhy are we importing this is we aren't calling it anywhere? Oh, but we are; when our JSX is transpiled, it'll resolve to invocations of `createAudioElement`. It is the Wax equivalent of `React.createElement`, and follows the exact same signature!\n\n```js\nrenderAudioGraph(\n    createAudioElement(\n        AudioGraph,\n        null,\n        createAudioElement(\n            Oscillator,\n            {\n                frequency: [\n                    setValueAtTime(200, 0),\n                    exponentialRampToValueAtTime(800, 3)\n                ],\n                type: 'square',\n                endTime: 3,\n            },\n        ),\n        createAudioElement(Gain, { gain: 0.2 }),\n        createAudioElement(StereoPanner, { pan: -0.2 }),\n        createAudioElement(Destination, null),\n    )\n);\n```\n\nTo achieve this transformation, we can use [Babel](https://babeljs.io) and the [`transform-react-jsx`](https://babeljs.io/docs/en/babel-plugin-transform-react-jsx) plugin; the latter exposes a `pragma` option that we can configure to transform JSX to `createAudioElement` calls:\n\n```json\n{\n    \"plugins\": [\n        [\"@babel/transform-react-jsx\", {\n            \"pragma\": \"createAudioElement\"\n        }]\n    ]\n}\n```\n\nDespite the name, this plugin performs general JSX transformations, defaulting to `React.createElement`. You do not need React to use Wax!\n\nTo create a bundle containing Wax and our app's code, we'll need a build tool **that supports ES Modules**. For the example apps, we use [Rollup](https://rollupjs.org/) and [`rollup-plugin-babel`](https://github.com/rollup/rollup-plugin-babel) to respect JSX transpilation ([config](https://github.com/jamesseanwright/wax/blob/master/rollup.config.js)).\n\nOnce we have our bundle, we can load it into a HTML document using a `<script>` element:\n\n```html\n<script src=\"/index.js\"></script>\n```\n\n## `createAudioElement` and ESLint\n\nIf you are using ESLint to analyse your code, you may receive this error:\n\n```\n'createAudioElement' is defined but never used.\n```\n\nThis is because `eslint-plugin-react` expects the pragma to be `React.createElement`. To suppress this error, one should explicitly configure the React plugin in the ESLint config's `settings` property:\n\n```json\n{\n    \"settings\": {\n        \"react\": {\n            \"pragma\": \"createAudioElement\"\n        }\n    }\n}\n```\n\nAlternatively, one can specify an `@jsx` directive at the beginning of your module\n\n```js\n/** @jsx createAudioElement */\n```\n"
  },
  {
    "path": "docs/002-audio-parameters.md",
    "content": "# Manipulating Audio Parameters\n\nThe [`AudioParam`](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam) interface provides a means of changing audio properties, such as `OscillatorNode.prototype.frequency` and `GainNode.prototype.gain`, via direct values or scheduled events.\n\nLooking at our app, we can observe that it a few parameter changes will occur:\n\n```jsx\n<Oscillator\n    frequency={[\n        setValueAtTime(200, 0),\n        exponentialRampToValueAtTime(800, 3),\n    ]}\n    type=\"square\"\n    endTime={3}\n/>\n<Gain gain={0.2} />\n<StereoPanner pan={-0.2} />\n```\n\n* `<Oscillator />`'s frequency will immediately be set to 200 Hz, then ramped to 800 Hz over a duration of 3 seconds\n\n* `<Gain />`'s gain will be a constant value of `0.2`\n\n* `<StereoPanner />`'s pan will be a constant value of `-0.2`\n\nWith this in mind, Wax components support param changes with various props, whose values can be:\n\n* a single, constant value\n* a single parameter mutation e.g. `frequency={setValueAtTime(200, 0)}`\n* an array of parameter mutations (as above), which will be applied in order of declaration\n\n## What Are Parameter Mutations?\n\nParameter mutations are functions that conform to those exposed by the `AudioParam` interface; If an audio parameter supports it, then Wax will export a mutation for it! All of them are exported for consumption, but to list them for transparency:\n\n* `setValueAtTime`\n* `linearRampToValueAtTime`\n* `exponentialRampToValueAtTime`\n* `setTargetAtTime`\n* `setValueCurveAtTime`\n"
  },
  {
    "path": "docs/003-aggregations.md",
    "content": "# Building Complex Graphs with `<Aggregation />`s\n\nThus far, we have built a simple, linear audio graph. What if we want to build more complex graphs in which multiple sources connect to common nodes? In Wax, we can achieve this with the `<Aggregation />` component.\n\nSay we wish to build the following graph:\n\n![A more complex audio graph, with two separate nodes connecting to a single StereoPannerNode](https://raw.githubusercontent.com/jamesseanwright/wax/master/docs/images/complex-graph.png)\n\nThe Web Audio API is built for this, demonstrating how the audio node model is a pragmatic fit:\n\n```js\n// Node instantiation code assumed\noscillator.connect(gain);\ngain.connect(stereoPanner);\nbufferSource.connect(stereoPanner);\nstereoPanner.connect(context.destination);\n```\n\nGreat! But how can we achieve this with Wax? With the `<Aggregation />` component, that's how:\n\n```js\nimport {\n    Aggregation,\n    ...\n} from 'wax-core';\n\n// [...]\n\nconst yodel = await fetchAsAudioBuffer('/yodel.mp3', audioContext);\nconst stereoPanner = <StereoPanner pan={0.4} />;\n\nrenderAudioGraph(\n    <AudioGraph>\n        <Aggregation>\n            <Oscillator\n                frequency={[\n                    setValueAtTime(200, 0),\n                    exponentialRampToValueAtTime(800, 3),\n                ]}\n                type=\"square\"\n                endTime={3}\n            />\n            <Gain gain={0.1} />\n            {stereoPanner}\n        </Aggregation>\n        <Aggregation>\n            <AudioBufferSource\n                buffer={yodel}\n            />\n            {stereoPanner}\n        </Aggregation>\n        {stereoPanner}\n        <Destination />\n    </AudioGraph>,\n    audioContext,\n);\n```\n\nYou can think of an aggregation as a nestable audio graph; it will connect its children sequentially. When the root `<AudioGraph />` is rendered, any inner `<Aggregation />` elements will be respected, avoiding double rendering and connection issues.\n\nTypically, you'll declare a shared element in a single place, to which the other children of an aggregation can connect; said shared element can then be specified again in the main audio graph to ensure it is ultimately connected, directly or indirectly, to the destination node.\n\nLet's clarify this within the above example. We declare a single `<StereoPanner />` element, which will create a single `StereoPannerNode` to which a `GainNode` and an `AudioBufferSourceNode` will respectively connect. Outside of the two `<Aggregation />` elements, we specify the same element instance again in the root audio graph, so that it will be connected to `audioContext.destination`. As we can reuse existing elements by declaration name within a set of curly braces in React, we can achieve the same in Wax; to summarise, these sharable elements can be used within inner aggregations and the root audio graph to generate more complex sounds.\n"
  },
  {
    "path": "docs/004-updating-audio-graphs.md",
    "content": "# Updating Rendered `<AudioGraph />`s\n\nThus far, we have been rendering static audio graphs with the `renderAudioGraph` function. Say we now want to update a graph that creates an oscillator, whose frequency is dictated by a slider (`<input type=\"range\" />`):\n\n```js\nimport {\n    renderAudioGraph,\n    ...\n} from 'wax-core';\n\nconst slider = document.body.querySelector('#slider');\n\nslider.addEventListener('change', ({ target }) => {\n    renderAudioGraph(\n        <AudioGraph>\n            <Oscillator frequency={target.value} />\n        </AudioGraph>\n    );\n});\n```\n\nThe problem with this approach is that we'll be creating a new audio node for each element whenever the slider's value changes! While `AudioNode`s are cheap to create, the above code will result in many frequencies being played at once; try at your own peril, but I can assure you that it sounds horrible.\n\nTo update a tree that already exists, one can replace `renderAudioGraph` with `renderPersistentAudioGraph`:\n\n```js\nimport {\n    renderPersistentAudioGraph,\n    ...\n} from 'wax-core';\n\nconst slider = document.body.querySelector('#slider');\nlet value = 40;\n\nslider.value = value;\n\nconst audioGraph = (\n    <AudioGraph>\n        <Oscillator frequency={value} />\n    </AudioGraph>\n);\n\nconst updateAudioGraph = renderPersistentAudioGraph(audioGraph);\n\nslider.addEventListener('change', ({ target }) => {\n    value = target.value;\n    updateAudioGraph(audioGraph);\n});\n```\n\nBy invoking the `updateAudioGraph` function returned by calling `renderPersistentAudioGraph`, we can update our existing tree of audio elements to reflect the latest property values; this internally reinvokes component logic across the tree, but against the already-created nodes. It's analogous to React's [reconciliation](https://reactjs.org/docs/reconciliation.html) algorithm, albeit infinitely less sophisticated.\n\n## A Note on \"Reconciliation\"\n\nAt present, Wax will not diff element trees between renders to determine if nodes have been added or removed; it assumes that their structures are identical, and that only respective properties have changed. This is certaintly a big limitation and will be addressed properly if this project evolves from its experimental stage; for the time being, conditionally specifying elements will not work:\n\n```js\n<AudioGraph>\n    {/* This won't work... yet. */}\n    {makeNoise && <Oscillator frequency={frequency} />}\n</AudioGraph>\n```\n"
  },
  {
    "path": "docs/005-interop-with-react.md",
    "content": "# Interop with React\n\nIn the prior chapter, we learned how to update an existing audio graph whenever a `HTMLInputElement` fires a `change` event. Can we handle the visual UI with React while supporting JSX-declared audio graphs with Wax?\n\nThe problem is that we need to be able to use `React.createElement` and `createAudioElement` at the same time. What if we could compose a single pragma that can select whether to use the former or the latter at runtime? The [`withReact` example app](https://github.com/jamesseanwright/wax/blob/master/example/src/withReact.jsx) has a solution:\n\n```js\n/** @jsx createElement */\n\nimport {\n    isWaxComponent,\n    ...\n} from 'wax-core';\n\nimport combineElementCreators from './combineElementCreators';\n\nconst createElement = combineElementCreators(\n    [isWaxComponent, createAudioElement],\n    [() => true, React.createElement],\n);\n```\n\n`combineElementCreators` is a function that takes a mapping between predicates and pragmas, and returns a new pragma to be targeted by our transpiler. In our example, if an element belongs to Wax (determined using the exposed `isWaxComponent` binding), then the `createAudioElement` pragma will be invoked; otherwise, we'll default to `React.createElement`. `combineElementCreators` isn't provided by Wax but can be implemented with a few lines of code:\n\n```js\nconst getCreator = (map, Component) =>\n    [...map.entries()]\n        .find(([predicate]) => predicate(Component))[1];\n\nconst combineElementCreators = (...creatorBindings) => {\n    const map = new Map(creatorBindings);\n\n    return (Component, props, ...children) => {\n        const creator = getCreator(map, Component);\n        return creator(Component, props, ...children);\n    };\n};\n```\n\nWe can then instruct Babel to target this pragma in the usual way:\n\n```json\n{\n    \"presets\": [\n        [\"@babel/react\", {\n            \"pragma\": \"createElement\"\n        }]\n    ]\n}\n```\n\nThe aforementioned `withReact` example demonstrates how `ReactDOM.render` and `renderPersistentAudioGraph` can be used across a single app.\n"
  },
  {
    "path": "docs/006-api-reference.md",
    "content": "# API Reference\n\nComing soon. I'm going to find a nice way of autogenerating this!\n"
  },
  {
    "path": "docs/007-local-development.md",
    "content": "# Local Development\n\nTo build Wax and to run the example apps locally, you'll first need to run these commands in your terminal\n\n```shell\ngit clone https://github.com/jamesseanwright/wax.git # or fork and use SSH if submitting a PR\ncd wax\nnpm i\n```\n\nThen you can run one of the following scripts:\n\n* `npm run build` - builds the library and outputs it to the `dist` dir, ready for publishing\n* `npm run build-example` - builds the example app specified in the `ENTRY` environment variable, defaulting to `simple`\n* `npm run dev` - builds the library, then builds and runs the example app specified in the `ENTRY` environment variable, defaulting to `simple`\n* `npm test` - lints the source code (including `example`) and runs the unit tests. Append ` -- --watch` to enter Jest's watch mode\n\nFor more information on the example apps, consult the [README in the `example` folder](https://github.com/jamesseanwright/wax/blob/master/example/README.md).\n"
  },
  {
    "path": "example/README.md",
    "content": "# Example Apps\n\nThe `src` directory contains three example Wax applications, each bootstrapped by the same HTML document (index.html):\n\n* `simple.jsx` - declares an oscillator, with a couple of scheduled frequency changes, whose gain and pan are altered\n\n* `aggregation.jsx` - demonstrates how one can use the `<Aggregation />` component to build more complex audio graphs\n\n* `withReact.jsx` - renders a slider element using React, which updates an `<AudioGraph />` whenever its value is changed. This calls `renderPersistentAudioGraph()`\n\n## Running the Examples\n\nAfter following the setup guide in the [local development documentation](https://github.com/jamesseanwright/wax/blob/master/docs/007-local-development.md), run `npm run dev` from the root of the repository. You can specify the `ENTRY` environment varible to select which app to run; this is the name of the app **without** the `.jsx` extension e.g. `ENTRY=withReact npm run dev`. If omitted, `simple` will be built and started.\n\nThe example apps are [built using rollup](https://github.com/jamesseanwright/wax/blob/master/rollup.config.js).\n\n## Mitigating Chrome's Autoplay Policy\n\nAs of December 2018, Chrome instantiates `AudioContext`s in the `'suspended'` state, [requiring an explicit user interaction before they can be resumed](https://developers.google.com/web/updates/2017/09/autoplay-policy-changes#webaudio). To mitigate this, there is some [JavaScript in the HTML bootstrapper (index.html) which will create, resume, and forward a context to the current app](https://github.com/jamesseanwright/wax/blob/master/example/src/index.html#L14) via a callback. This will potentially become a requirement for other browsers as they adopt similar policies in the future.\n"
  },
  {
    "path": "example/devServer.js",
    "content": "'use strict';\n\n/* node-static unfortunately doesn't provide\n * the correct Content-Type header for non-HTML\n * files, breaking the decoding of yodel.mp3.\n * Thus, I'm overriding this via this abstraction */\n\nconst PORT = 8080;\nconst DEFAULT_MIME_TYPE = 'application/html';\n\nconst http = require('http');\nconst path = require('path');\nconst url = require('url');\nconst nodeStatic = require('node-static');\n\nconst file = new nodeStatic.Server(\n    path.join(__dirname, 'dist'),\n    { cache: 0 },\n);\n\nconst contentTypes = new Map([\n    [/.*\\.mp3$/ig, 'audio/mp3'],\n]);\n\nconst getContentType = req => {\n    const { pathname } = url.parse(req.url);\n\n    for (let [expression, mimeType] of contentTypes) {\n        if (expression.test(pathname)) {\n            return mimeType;\n        }\n    }\n\n    return DEFAULT_MIME_TYPE;\n};\n\nconst server = http.createServer((req, res) => {\n    req.on('end', () => {\n        res.setHeader('Content-Type', getContentType(req));\n        file.serve(req, res);\n    }).resume();\n});\n\n// eslint-disable-next-line no-console\nserver.listen(PORT, () => console.log('Dev server listening on', PORT));\n"
  },
  {
    "path": "example/src/aggregation.jsx",
    "content": "import {\n    createAudioElement,\n    renderAudioGraph,\n    AudioGraph,\n    Aggregation,\n    AudioBufferSource,\n    Oscillator,\n    Gain,\n    StereoPanner,\n    Destination,\n    setValueAtTime,\n    exponentialRampToValueAtTime,\n} from 'wax-core';\n\nconst fetchAsAudioBuffer = async (url, audioContext) => {\n    const response = await fetch(url);\n    const arrayBuffer = await response.arrayBuffer();\n    return await audioContext.decodeAudioData(arrayBuffer);\n};\n\nonAudioContextResumed(async context => {\n    const yodel = await fetchAsAudioBuffer('/yodel.mp3', context);\n    const stereoPanner = <StereoPanner pan={0.4} />;\n\n    renderAudioGraph(\n        <AudioGraph>\n            <Aggregation>\n                <Oscillator\n                    frequency={[\n                        setValueAtTime(200, 0),\n                        exponentialRampToValueAtTime(800, 3),\n                    ]}\n                    type=\"square\"\n                    endTime={3}\n                />\n                <Gain gain={0.1} />\n                {stereoPanner}\n            </Aggregation>\n            <Aggregation>\n                <AudioBufferSource\n                    buffer={yodel}\n                />\n                <Gain gain={1.4} />\n                {stereoPanner}\n            </Aggregation>\n            {stereoPanner}\n            <Destination />\n        </AudioGraph>,\n        context,\n    );\n});\n"
  },
  {
    "path": "example/src/combineElementCreators.js",
    "content": "const getCreator = (map, Component) =>\n    [...map.entries()]\n        .find(([predicate]) => predicate(Component))[1];\n\nconst combineElementCreators = (...creatorBindings) => {\n    const map = new Map(creatorBindings);\n\n    return (Component, props, ...children) => {\n        const creator = getCreator(map, Component);\n        return creator(Component, props, ...children);\n    };\n};\n\nexport default combineElementCreators;\n"
  },
  {
    "path": "example/src/index.html",
    "content": "<!DOCTYPE html>\n<html>\n    <head>\n        <meta charset=\"utf-8\" />\n        <title>Wax Example</title>\n    </head>\n    <body>\n        <main>\n            <h1>Wax Example</h1>\n            <p>Turn your speakers up!</p>\n            <button id=\"start\">Resume audio context and start!</button>\n            <section id=\"react-target\"></section>\n            <script>\n                'use strict';\n\n                /* Workaround for Chrome's Web Audio autoplay\n                 * policy. See the README for more info. */\n\n                const button = document.body.querySelector('#start');\n                const audioContext = new AudioContext();\n                let resumptionCallback = () => undefined;\n\n                const onAudioContextResumed = callback => {\n                    resumptionCallback = callback;\n                };\n\n                button.onclick = async () => {\n                    await audioContext.resume();\n                    resumptionCallback(audioContext);\n                };\n            </script>\n            <script src=\"/index.js\"></script>\n        </main>\n    </body>\n</html>\n"
  },
  {
    "path": "example/src/simple.jsx",
    "content": "import {\n    createAudioElement,\n    renderAudioGraph,\n    AudioGraph,\n    Oscillator,\n    Gain,\n    StereoPanner,\n    Destination,\n    setValueAtTime,\n    exponentialRampToValueAtTime,\n} from 'wax-core';\n\nonAudioContextResumed(context => {\n    renderAudioGraph(\n        <AudioGraph>\n            <Oscillator\n                frequency={[\n                    setValueAtTime(200, 0),\n                    exponentialRampToValueAtTime(800, 3),\n                ]}\n                type=\"square\"\n                endTime={3}\n            />\n            <Gain gain={0.2} />\n            <StereoPanner pan={-0.2} />\n            <Destination />\n        </AudioGraph>,\n        context,\n    );\n});\n"
  },
  {
    "path": "example/src/withReact.jsx",
    "content": "/** @jsx createElement */\n\nimport React from 'react';\nimport ReactDOM from 'react-dom';\n\nimport {\n    isWaxComponent,\n    createAudioElement,\n    renderPersistentAudioGraph,\n    AudioGraph,\n    Oscillator,\n    Gain,\n    Destination,\n} from 'wax-core';\n\nimport combineElementCreators from './combineElementCreators';\n\nconst createElement = combineElementCreators(\n    [isWaxComponent, createAudioElement],\n    [() => true, React.createElement],\n);\n\nclass Slider extends React.Component {\n    constructor(props) {\n        super(props);\n        this.onChange = this.onChange.bind(this);\n    }\n\n    componentDidMount() {\n        const { children, min, audioContext } = this.props;\n\n        this.updateAudioGraph = renderPersistentAudioGraph(\n            children(min),\n            audioContext,\n        );\n    }\n\n    onChange({ target }) {\n        this.updateAudioGraph(\n            this.props.children(target.value),\n        );\n    }\n\n    render() {\n        return (\n            <input\n                type=\"range\"\n                min={this.props.min}\n                max={this.props.max}\n                onChange={this.onChange}\n            />\n        );\n    }\n}\n\nonAudioContextResumed(context => {\n    ReactDOM.render(\n        <Slider\n            audioContext={context}\n            min={40}\n            max={800}\n        >\n            {value =>\n                <AudioGraph>\n                    <Oscillator\n                        frequency={value}\n                        type=\"square\"\n                    />\n                    <Gain gain={0.2} />\n                    <Destination />\n                </AudioGraph>\n            }\n        </Slider>,\n        document.querySelector('#react-target'),\n    );\n});\n"
  },
  {
    "path": "jest.config.js",
    "content": "module.exports = {\n    testRegex: 'src\\\\/(.*\\\\/)*__tests__\\\\/.*\\\\.test\\\\.jsx?$',\n    transform: {\n        '.*\\\\.jsx?$': 'babel-jest',\n    },\n};\n"
  },
  {
    "path": "package.json",
    "content": "{\n  \"name\": \"wax-core\",\n  \"version\": \"0.1.1\",\n  \"description\": \"An experimental, JSX-compatible renderer for the Web Audio API\",\n  \"main\": \"dist/index.js\",\n  \"scripts\": {\n    \"build\": \"babel src --out-dir dist\",\n    \"build-example\": \"mkdir -p example/dist && rm -rf example/dist/* && rollup -c && bash -c 'cp -r example/src/*.{html,mp3} example/dist'\",\n    \"dev\": \"npm run build && npm run build-example && node example/devServer\",\n    \"test\": \"eslint --ext js --ext jsx src example/src && jest\",\n    \"test:ci\": \"npm test -- --coverage --coverageReporters=text-lcov | coveralls\"\n  },\n  \"repository\": {\n    \"type\": \"git\",\n    \"url\": \"git+https://github.com/jamesseanwright/wax.git\"\n  },\n  \"keywords\": [\n    \"web\",\n    \"audio\",\n    \"api\",\n    \"components\",\n    \"jsx\",\n    \"react\"\n  ],\n  \"author\": \"James Wright <james@jamesswright.co.uk>\",\n  \"license\": \"ISC\",\n  \"bugs\": {\n    \"url\": \"https://github.com/jamesseanwright/wax/issues\"\n  },\n  \"homepage\": \"https://github.com/jamesseanwright/wax#readme\",\n  \"devDependencies\": {\n    \"@babel/cli\": \"7.0.0\",\n    \"@babel/core\": \"7.0.0\",\n    \"@babel/plugin-proposal-object-rest-spread\": \"7.0.0\",\n    \"@babel/plugin-transform-modules-commonjs\": \"7.1.0\",\n    \"@babel/plugin-transform-react-jsx\": \"7.0.0\",\n    \"@babel/preset-react\": \"7.0.0\",\n    \"babel-core\": \"7.0.0-bridge.0\",\n    \"babel-jest\": \"23.6.0\",\n    \"babel-plugin-transform-inline-environment-variables\": \"0.4.3\",\n    \"coveralls\": \"3.0.2\",\n    \"eslint\": \"5.5.0\",\n    \"eslint-plugin-react\": \"7.11.1\",\n    \"jest\": \"23.6.0\",\n    \"node-static\": \"0.7.10\",\n    \"nyc\": \"^13.0.1\",\n    \"react\": \"16.5.0\",\n    \"react-dom\": \"16.5.0\",\n    \"remove\": \"^0.1.5\",\n    \"rollup\": \"0.65.0\",\n    \"rollup-plugin-alias\": \"1.4.0\",\n    \"rollup-plugin-babel\": \"4.0.2\",\n    \"rollup-plugin-commonjs\": \"9.1.6\",\n    \"rollup-plugin-node-resolve\": \"3.3.0\"\n  }\n}\n"
  },
  {
    "path": "rollup.config.js",
    "content": "/* TODO: move this and dependencies to\n * separate package in example directory?\n */\n\nimport { resolve as resolvePath } from 'path';\nimport alias from 'rollup-plugin-alias';\nimport commonjs from 'rollup-plugin-commonjs';\nimport resolve from 'rollup-plugin-node-resolve';\nimport babel from 'rollup-plugin-babel';\n\nconst entry = process.env.ENTRY || 'simple';\n\nexport default {\n    input: `example/src/${entry}.jsx`,\n    output: {\n        file: 'example/dist/index.js',\n        format: 'iife',\n    },\n    plugins: [\n        resolve(),\n        commonjs(), // for React and ReactDOM\n        alias({\n            'wax-core': resolvePath(__dirname, 'dist', 'index.js'),\n        }),\n        babel(\n            entry === 'withReact'\n                && {\n                    babelrc: false,\n                    presets: [\n                        ['@babel/react', {\n                            pragma: 'createElement',\n                            pragmaFrag: 'React.Fragment',\n                        }],\n                    ],\n                    plugins: ['transform-inline-environment-variables'],\n                }\n        ),\n    ],\n};\n"
  },
  {
    "path": "src/__tests__/connectNodes.test.js",
    "content": "import { NO_OP } from '../components/NoOp';\nimport connectNodes from '../connectNodes';\nimport { createArrayWith } from './helpers';\n\nconst createStubAudioNode = () => ({\n    connect: jest.fn(),\n});\n\n/* we concat item[item.length - 1] if item\n * is an array to match the reducing nature\n * of the connectNodes function. */\nconst flatten = array => array.reduce(\n    (arr, item) => (\n        arr.concat(Array.isArray(item) ? item[item.length - 1] : item)\n    ), []);\n\ndescribe('connectNodes', () => {\n    it('should sequentially connect an array of audio nodes and return the last', () => {\n        const nodes = createArrayWith(5, createStubAudioNode);\n        const result = connectNodes(nodes);\n\n        expect(result).toBe(nodes[nodes.length - 1]);\n\n        nodes.reduce((previousNode, currentNode) => {\n            expect(previousNode.connect).toHaveBeenCalledTimes(1);\n            expect(previousNode.connect).toHaveBeenCalledWith(currentNode);\n            return currentNode;\n        });\n    });\n\n    it('should not connect NO_OP nodes', () => {\n        const noOpIndex = 3;\n\n        const nodes = createArrayWith(\n            5,\n            (_, i) => i === noOpIndex ? NO_OP : createStubAudioNode(),\n        );\n\n        connectNodes(nodes);\n\n        nodes.reduce((previousNode, currentNode) => {\n            if (currentNode === NO_OP) {\n                expect(previousNode.connect).not.toHaveBeenCalled();\n            } else if (previousNode !== NO_OP) {\n                expect(previousNode.connect).toHaveBeenCalledTimes(1);\n                expect(previousNode.connect).toHaveBeenCalledWith(currentNode);\n            }\n\n            return currentNode;\n        });\n    });\n\n    it('should reduce multidimensional arrays of AudioNodes', () => {\n        const nodes = [\n            ...createArrayWith(3, createStubAudioNode),\n            createArrayWith(4, createStubAudioNode),\n            ...createArrayWith(2, createStubAudioNode),\n        ];\n\n        connectNodes(nodes);\n\n        flatten(nodes).reduce((previousNode, currentNode) => {\n            expect(previousNode.connect).toHaveBeenCalledTimes(1);\n            expect(previousNode.connect).toHaveBeenCalledWith(currentNode);\n            return currentNode;\n        });\n    });\n});\n"
  },
  {
    "path": "src/__tests__/createAudioElement.test.js",
    "content": "import { createArrayWith } from './helpers';\nimport createAudioElement from '../createAudioElement';\n\nconst createElementCreator = node => {\n    const creator = jest.fn().mockReturnValue(node);\n    creator.isElementCreator = true;\n    return creator;\n};\n\nconst createAudioGraph = nodeTree =>\n    nodeTree.map(node =>\n        Array.isArray(node)\n            ? createElementCreator(createAudioGraph(node))\n            : createElementCreator(node)\n    );\n\n\nconst assertNestedAudioGraph = (graph, nodeTree, audioContext) => {\n    graph.forEach((creator, i) => {\n        if (Array.isArray(creator)) {\n            assertNestedAudioGraph(creator, nodeTree[i], audioContext);\n        } else {\n            expect(creator).toHaveBeenCalledTimes(1);\n            expect(creator).toHaveBeenCalledWith(audioContext, nodeTree[i]);\n        }\n    });\n};\n\ndescribe('createAudioElement', () => {\n    it('should conform to the JSX pragma signature and return a creator function', () => {\n        const node = {};\n        const audioContext = {};\n        const Component = jest.fn().mockReturnValue(node);\n        const props = { foo: 'bar', bar: 'baz' };\n        const children = [{}, {}, {}];\n        const creator = createAudioElement(Component, props, ...children);\n        const result = creator(audioContext);\n\n        expect(result).toBe(node);\n        expect(Component).toHaveBeenCalledTimes(1);\n        expect(Component).toHaveBeenCalledWith({\n            children,\n            audioContext,\n            ...props,\n        });\n    });\n\n    it('should render a creator when it is returned from a component', () => {\n        const innerNode = {};\n        const innerCreator = createElementCreator(innerNode);\n        const audioContext = {};\n        const Component = jest.fn().mockReturnValue(innerCreator);\n        const creator = createAudioElement(Component, {});\n        const result = creator(audioContext);\n\n        expect(result).toBe(innerNode);\n        expect(innerCreator).toHaveBeenCalledTimes(1);\n        expect(innerCreator).toHaveBeenCalledWith(audioContext, undefined);\n    });\n\n    it('should invoke child creators when setting the parent`s `children` prop', () => {\n        const children = createArrayWith(10, (_, id) => {\n            const node = { id };\n            const creator = createElementCreator(node);\n            return { node, creator };\n        });\n\n        const audioContext = {};\n        const Component = jest.fn().mockReturnValue({});\n\n        const creator = createAudioElement(\n            Component,\n            {},\n            ...children.map(({ creator }) => creator),\n        );\n\n        creator(audioContext);\n\n        expect(Component).toHaveBeenCalledWith({\n            audioContext,\n            children: children.map(({ node }) => node),\n        });\n    });\n\n    it('should reconcile existing nodes for the graph from the provided array', () => {\n        const nodeTree = [\n            { id: 0 },\n            { id: 1 },\n            [\n                { id: 2 },\n                { id: 3 },\n                [\n                    { id: 4 },\n                    { id: 5 },\n                ],\n                { id: 6 },\n            ],\n            { id: 7 },\n            { id: 8 },\n        ];\n\n        const audioContext = { isAudioContext: true };\n        const graph = createAudioGraph(nodeTree);\n        const Component = jest.fn().mockReturnValue(nodeTree);\n\n        const audioGraphCreator = createAudioElement(\n            Component,\n            {},\n            ...graph,\n        );\n\n        audioGraphCreator(audioContext, nodeTree);\n        assertNestedAudioGraph(graph, nodeTree, audioContext);\n    });\n\n    it('should cache creator results', () => {\n        const Component = jest.fn().mockReturnValue({});\n\n        const creator = createAudioElement(\n            Component,\n            {},\n        );\n\n        creator({});\n        creator({});\n\n        expect(Component).toHaveBeenCalledTimes(1);\n    });\n});\n"
  },
  {
    "path": "src/__tests__/helpers.js",
    "content": "export const createStubAudioContext = (currentTime = 0) => ({\n    currentTime,\n    createGain() {\n        return {\n            gain: {\n                value: 0,\n            },\n        };\n    },\n    createStereoPanner() {\n        return {\n            pan: {\n                value: 0,\n            },\n        };\n    },\n    createBufferSource() {\n        return {\n            detune: {\n                value: 0,\n            },\n            playbackRate: {\n                value: 0,\n            },\n        };\n    },\n    createOscillator() {\n        return {\n            detune: {\n                value: 0,\n            },\n            frequency: {\n                value: 0,\n            },\n        };\n    }\n});\n\nexport const createArrayWith = (length, creator) =>\n    Array(length).fill(null).map(creator);\n"
  },
  {
    "path": "src/components/Aggregation.jsx",
    "content": "import createAudioElement from '../createAudioElement';\nimport NoOp from './NoOp';\nimport AudioGraph from './AudioGraph';\n\nconst Aggregation = ({ children }) => (\n    <AudioGraph>\n        {children}\n        <NoOp />\n    </AudioGraph>\n);\n\nexport default Aggregation;\n"
  },
  {
    "path": "src/components/AudioBufferSource.js",
    "content": "import asSourceNode from './asSourceNode';\nimport assignAudioParam from '../paramMutations/assignAudioParam';\n\nexport const createAudioBufferSource = assignParam =>\n    ({\n        audioContext,\n        buffer,\n        detune,\n        loop = false,\n        loopStart = 0,\n        loopEnd = 0,\n        playbackRate,\n        enqueue,\n        node = audioContext.createBufferSource(),\n    }) => {\n        node.buffer = buffer;\n        node.loop = loop;\n        node.loopStart = loopStart;\n        node.loopEnd = loopEnd;\n        assignParam(node.detune, detune, audioContext.currentTime);\n        assignParam(node.playbackRate, playbackRate, audioContext.currentTime);\n\n        enqueue(node);\n\n        return node;\n    };\n\nexport default asSourceNode(\n    createAudioBufferSource(\n        assignAudioParam\n    )\n);\n"
  },
  {
    "path": "src/components/AudioGraph.js",
    "content": "/* This does little right now,\n * but might be used in the future\n * to make optimisations and to\n * invoke other operations. */\n\nconst AudioGraph = ({ children }) => children;\n\nexport default AudioGraph;\n"
  },
  {
    "path": "src/components/ChannelMerger.js",
    "content": "const ChannelMerger = ({\n    audioContext,\n    inputs,\n    children,\n    node = audioContext.createChannelMerger(inputs),\n}) => {\n    const [setupConnections] = children;\n\n    const connectToChannel = (childNode, channel) => {\n        // assumption here that all nodes have 1 output. Extra param?\n        childNode.connect(node, 0, channel);\n    };\n\n    setupConnections(connectToChannel);\n\n    return node;\n};\n\nexport default ChannelMerger;\n"
  },
  {
    "path": "src/components/Destination.js",
    "content": "/* A convenience wrapper to expose\n * the audio context's destination\n * as a Web Audio X Component. */\n\nconst Destination = ({ audioContext }) => audioContext.destination;\n\nexport default Destination;\n"
  },
  {
    "path": "src/components/Gain.js",
    "content": "import assignAudioParam from '../paramMutations/assignAudioParam';\n\nexport const createGain = assignParam =>\n    ({\n        audioContext,\n        gain,\n        node = audioContext.createGain(),\n    }) => {\n        assignParam(node.gain, gain, audioContext.currentTime);\n        return node;\n    };\n\nexport default createGain(assignAudioParam);\n"
  },
  {
    "path": "src/components/NoOp.js",
    "content": "/* An internal, workaround component\n * to inform the node connector that\n * no connection is required at this\n * point in the current subtree.\n * Used by Aggregation. */\n\nexport const NO_OP = 'NO_OP';\n\nconst NoOp = () => NO_OP;\n\nexport default NoOp;\n"
  },
  {
    "path": "src/components/Oscillator.js",
    "content": "import asSourceNode from './asSourceNode';\nimport assignAudioParam from '../paramMutations/assignAudioParam';\n\nexport const createOscillator = assignParam =>\n    ({\n        audioContext,\n        detune = 0,\n        frequency,\n        type,\n        onended,\n        enqueue,\n        node = audioContext.createOscillator(),\n    }) => {\n        assignParam(node.detune, detune, audioContext.currentTime);\n        assignParam(node.frequency, frequency, audioContext.currentTime);\n        node.type = type;\n        node.onended = onended;\n\n        enqueue(node);\n\n        return node;\n    };\n\nexport default asSourceNode(\n    createOscillator(assignAudioParam)\n);\n"
  },
  {
    "path": "src/components/StereoPanner.js",
    "content": "import assignAudioParam from '../paramMutations/assignAudioParam';\n\nexport const createStereoPanner = assignParam =>\n    ({\n        audioContext,\n        pan,\n        node = audioContext.createStereoPanner(),\n    }) => {\n        assignParam(node.pan, pan, audioContext.currentTime);\n        return node;\n    };\n\nexport default createStereoPanner(assignAudioParam);\n"
  },
  {
    "path": "src/components/__tests__/AudioBufferSource.test.js",
    "content": "\nimport { createAudioBufferSource } from '../AudioBufferSource';\nimport { createStubAudioContext } from '../../__tests__/helpers';\n\ndescribe('AudioBufferSource', () => {\n    let AudioBufferSource;\n    let assignAudioParam;\n\n    beforeEach(() => {\n        assignAudioParam = jest.fn().mockImplementationOnce((param, value) => param.value = value);\n        AudioBufferSource = createAudioBufferSource(assignAudioParam);\n    });\n\n    it('should create an AudioBufferSourceNode, assign its props, enqueue, and return said GainNode', () => {\n        const audioContext = createStubAudioContext();\n        const buffer = {};\n        const loop = true;\n        const loopStart = 1;\n        const loopEnd = 3;\n        const detune = 1;\n        const playbackRate = 1;\n        const enqueue = jest.fn();\n\n        const node = AudioBufferSource({\n            audioContext,\n            buffer,\n            loop,\n            loopStart,\n            loopEnd,\n            detune,\n            playbackRate,\n            enqueue,\n        });\n\n        expect(node.buffer).toBe(buffer);\n        expect(node.loop).toEqual(true);\n        expect(node.loopStart).toEqual(1);\n        expect(node.loopEnd).toEqual(3);\n\n        expect(assignAudioParam).toHaveBeenCalledTimes(2);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.detune, detune, audioContext.currentTime);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.playbackRate, playbackRate, audioContext.currentTime);\n        expect(enqueue).toHaveBeenCalledTimes(1);\n        expect(enqueue).toHaveBeenCalledWith(node);\n    });\n\n    it('should respect and mutate an existing node if provided', () => {\n        const audioContext = createStubAudioContext();\n        const node = audioContext.createBufferSource();\n        const buffer = {};\n\n        const result = AudioBufferSource({\n            audioContext,\n            buffer,\n            node,\n            enqueue: jest.fn(),\n        });\n\n        expect(node).toBe(result);\n        expect(node.buffer).toBe(buffer);\n    });\n});\n"
  },
  {
    "path": "src/components/__tests__/Gain.test.js",
    "content": "import { createGain } from '../Gain';\nimport { createStubAudioContext } from '../../__tests__/helpers';\n\ndescribe('Gain', () => {\n    let Gain;\n    let assignAudioParam;\n\n    beforeEach(() => {\n        assignAudioParam = jest.fn().mockImplementation((param, value) => param.value = value);\n        Gain = createGain(assignAudioParam);\n    });\n\n    it('should create a GainNode, assign its gain, and return said GainNode', () => {\n        const audioContext = createStubAudioContext();\n        const gain = 0.4;\n        const node = Gain({ audioContext, gain });\n\n        expect(assignAudioParam).toHaveBeenCalledTimes(1);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.gain, gain, audioContext.currentTime);\n        expect(node.gain.value).toEqual(gain);\n    });\n\n    it('should mutate an existing GainNode when provided', () => {\n        const audioContext = createStubAudioContext();\n        const node = audioContext.createGain();\n        const gain = 0.7;\n        const result = Gain({ audioContext, gain, node });\n\n        expect(result).toBe(node);\n        expect(assignAudioParam).toHaveBeenCalledTimes(1);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.gain, gain, audioContext.currentTime);\n        expect(node.gain.value).toEqual(gain);\n    });\n});\n"
  },
  {
    "path": "src/components/__tests__/Oscillator.test.js",
    "content": "import { createOscillator } from '../Oscillator';\nimport { createStubAudioContext } from '../../__tests__/helpers';\n\ndescribe('Oscillator', () => {\n    let assignAudioParam;\n    let Oscillator;\n\n    beforeEach(() => {\n        assignAudioParam = jest.fn().mockImplementation((param, value) => param.value = value);\n        Oscillator = createOscillator(assignAudioParam);\n    });\n\n    it('should create an OscillatorNode, assign its props, and return said OscillatorNode', () => {\n        const audioContext = createStubAudioContext();\n        const detune = 7;\n        const frequency = 300;\n        const type = 'square';\n        const onended = jest.fn();\n        const enqueue = jest.fn();\n\n        const node = Oscillator({\n            audioContext,\n            detune,\n            frequency,\n            type,\n            onended,\n            enqueue,\n        });\n\n        expect(assignAudioParam).toHaveBeenCalledTimes(2);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.detune, detune, audioContext.currentTime);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.frequency, frequency, audioContext.currentTime);\n        expect(enqueue).toHaveBeenCalledTimes(1);\n        expect(enqueue).toHaveBeenCalledWith(node);\n        expect(node.frequency.value).toEqual(frequency);\n        expect(node.detune.value).toEqual(detune);\n        expect(node.onended).toBe(onended);\n    });\n\n    it('should mutate an existing OscillatorNode when provided', () => {\n        const audioContext = createStubAudioContext();\n        const node = audioContext.createOscillator();\n        const frequency = 20;\n        const enqueue = jest.fn();\n\n        const result = Oscillator({\n            audioContext,\n            frequency,\n            enqueue,\n            node,\n        });\n\n        expect(result).toBe(node);\n        expect(result.frequency.value).toEqual(20);\n    });\n});\n"
  },
  {
    "path": "src/components/__tests__/StereoPanner.test.js",
    "content": "import { createStereoPanner } from '../StereoPanner';\nimport { createStubAudioContext } from '../../__tests__/helpers';\n\ndescribe('StereoPanner', () => {\n    let assignAudioParam;\n    let StereoPanner;\n\n    beforeEach(() => {\n        assignAudioParam = jest.fn().mockImplementationOnce((param, value) => param.value = value);\n        StereoPanner = createStereoPanner(assignAudioParam);\n    });\n\n    afterEach(() => {\n        assignAudioParam.mockReset();\n    });\n\n    it('should create a StereoPannerNode, assign its pan, and return said StereoPannerNode', () => {\n        const audioContext = createStubAudioContext();\n        const pan = 0.4;\n        const node = StereoPanner({ audioContext, pan });\n\n        expect(assignAudioParam).toHaveBeenCalledTimes(1);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.pan, pan, audioContext.currentTime);\n        expect(node.pan.value).toEqual(pan);\n    });\n\n    it('should mutate an existing StereoPannerNode when provided', () => {\n        const audioContext = createStubAudioContext();\n        const node = audioContext.createStereoPanner();\n        const pan = 0.7;\n        const result = StereoPanner({ audioContext, pan, node });\n\n        expect(result).toBe(node);\n        expect(assignAudioParam).toHaveBeenCalledTimes(1);\n        expect(assignAudioParam).toHaveBeenCalledWith(node.pan, pan, audioContext.currentTime);\n        expect(node.pan.value).toEqual(pan);\n    });\n});\n"
  },
  {
    "path": "src/components/__tests__/asSourceNode.test.jsx",
    "content": "import { createAsSourceNode } from '../asSourceNode';\nimport { createStubAudioContext } from '../../__tests__/helpers';\n\nconst createStubSourceNode = () => ({\n    start: jest.fn(),\n    stop: jest.fn(),\n});\n\ndescribe('asSourceNode HOC', () => {\n    // used by both asSourceNode and JSX below\n    const createAudioElement = jest.fn().mockImplementation(\n        (Component, props, ...children) =>\n            Component({\n                children,\n                ...props,\n            })\n    );\n\n    let asSourceNode;\n\n    beforeEach(() => {\n        asSourceNode = createAsSourceNode(createAudioElement);\n    });\n\n    it('should create a new component that proxies incoming props and provides an enqueue prop', () => {\n        const MyComponent = props => ({ props }); // TODO: common, reusable pattern across tests?\n        const SourceComponent = asSourceNode(MyComponent);\n        const sourceElement = <SourceComponent foo=\"bar\" />;\n\n        expect(sourceElement.props.foo).toEqual('bar');\n        expect(sourceElement.props.enqueue).toBeDefined();\n    });\n\n    it('should schedule playback of the source node based upon the startTime prop and context`s current time', () => {\n        const audioContext = createStubAudioContext(3);\n        const audioNode = createStubSourceNode();\n        const MyComponent = ({ enqueue, node }) => enqueue(node);\n        const SourceComponent = asSourceNode(MyComponent);\n\n        <SourceComponent\n            startTime={1}\n            audioContext={audioContext}\n            node={audioNode}\n        />;\n\n        expect(audioNode.start).toHaveBeenCalledTimes(1);\n        expect(audioNode.start).toHaveBeenCalledWith(4);\n        expect(audioNode.stop).not.toHaveBeenCalled();\n    });\n\n    it('should schedule the stopping of the source node when endTime is provided', () => {\n        const audioContext = createStubAudioContext(3);\n        const audioNode = createStubSourceNode();\n        const MyComponent = ({ enqueue, node }) => enqueue(node);\n        const SourceComponent = asSourceNode(MyComponent);\n\n        <SourceComponent\n            startTime={1}\n            endTime={5}\n            audioContext={audioContext}\n            node={audioNode}\n        />;\n\n        expect(audioNode.start).toHaveBeenCalledTimes(1);\n        expect(audioNode.start).toHaveBeenCalledWith(4);\n        expect(audioNode.stop).toHaveBeenCalledTimes(1);\n        expect(audioNode.stop).toHaveBeenCalledWith(8);\n    });\n\n    it('should not schedule node playback or stopping when it has already been scheduled', () => {\n        const audioContext = createStubAudioContext(3);\n        const audioNode = createStubSourceNode();\n        const MyComponent = ({ enqueue, node }) => enqueue(node);\n        const SourceComponent = asSourceNode(MyComponent);\n\n        audioNode.isScheduled = true;\n\n        <SourceComponent\n            startTime={1}\n            endTime={5}\n            audioContext={audioContext}\n            node={audioNode}\n        />;\n\n        expect(audioNode.start).not.toHaveBeenCalled();\n        expect(audioNode.stop).not.toHaveBeenCalled();\n    });\n});\n"
  },
  {
    "path": "src/components/asSourceNode.jsx",
    "content": "/* A higher-order component that\n * abstracts and centralises logic\n * for enqueuing the starting and\n * stopping of source nodes */\n\nimport elementCreator from '../createAudioElement';\n\nconst createEnqueuer = ({ audioContext, startTime = 0, endTime }) =>\n    node => {\n        if (node.isScheduled) {\n            return;\n        }\n\n        node.start(audioContext.currentTime + startTime);\n\n        if (endTime) {\n            node.stop(audioContext.currentTime + endTime);\n        }\n\n        node.isScheduled = true;\n    };\n\n/* thunk to create HOC with cAE\n * as injectable dependency */\nexport const createAsSourceNode = createAudioElement =>\n    Component =>\n        props =>\n            <Component\n                enqueue={createEnqueuer(props)}\n                {...props}\n            />;\n\nexport default createAsSourceNode(elementCreator);\n"
  },
  {
    "path": "src/components/index.js",
    "content": "export { default as Aggregation } from './Aggregation';\nexport { default as AudioGraph } from './AudioGraph';\nexport { default as AudioBufferSource } from './AudioBufferSource';\nexport { default as ChannelMerger } from './ChannelMerger';\nexport { default as Gain } from './Gain';\nexport { default as Oscillator } from './Oscillator';\nexport { default as StereoPanner } from './StereoPanner';\nexport { default as Destination } from './Destination';\n"
  },
  {
    "path": "src/connectNodes.js",
    "content": "import { NO_OP } from './components/NoOp';\n\nconst connectNodes = nodes =>\n    nodes.reduce((sourceNode, targetNode) => {\n        const source = Array.isArray(sourceNode)\n            ? connectNodes(sourceNode)\n            : sourceNode;\n\n        const target = Array.isArray(targetNode)\n            ? connectNodes(targetNode)\n            : targetNode;\n\n        if (source !== NO_OP && target !== NO_OP) {\n            source.connect(target);\n        }\n\n        return target;\n    });\n\nexport default connectNodes;\n"
  },
  {
    "path": "src/createAudioElement.js",
    "content": "/* I chose \"cache\" over \"memoise\" here,\n * as we don't cache by the inner\n * arguments. We just want to avoid\n * recomputing the node for a certain\n * creator func reference. */\nconst cache = func => {\n    let result;\n\n    return (...args) => {\n        if (!result) {\n            result = func(...args);\n        }\n\n        return result;\n    };\n};\n\n/* decoration is required to differentiate\n * between element creators and other function\n * children e.g. render props */\nconst asCachedCreator = creator => {\n    const cachedCreator = cache(creator);\n    cachedCreator.isElementCreator = true;\n    return cachedCreator;\n};\n\nconst getNodeFromTree = tree =>\n    !Array.isArray(tree)\n        ? tree\n        : undefined; // facilitates with default prop in destructuring\n\nconst createAudioElement = (Component, props, ...children) =>\n    asCachedCreator((audioContext, nodeTree = []) => {\n        const mapResult = (result, i) =>\n            result.isElementCreator\n                ? result(audioContext, nodeTree[i])\n                : result;\n\n        /* we want to render children first so the nodes\n         * can be directly consumed by their parents */\n        const createChildren = children => children.map(mapResult);\n        const existingNode = getNodeFromTree(nodeTree);\n\n        return mapResult(\n            Component({\n                children: createChildren(children),\n                audioContext,\n                node: existingNode,\n                ...props,\n            })\n        );\n    });\n\nexport default createAudioElement;\n"
  },
  {
    "path": "src/index.js",
    "content": "export { default as createAudioElement } from './createAudioElement';\nexport * from './renderAudioGraph';\nexport { default as isWaxComponent } from './isWaxComponent';\nexport * from './components';\nexport * from './paramMutations';\nexport { default as assignAudioParam } from './paramMutations/assignAudioParam';\n"
  },
  {
    "path": "src/isWaxComponent.js",
    "content": "import * as components from './components';\n\nconst componentsArray = Object.values(components);\nconst isWaxComponent = Component => componentsArray.includes(Component);\n\nexport default isWaxComponent;\n"
  },
  {
    "path": "src/paramMutations/__tests__/assignAudioParam.test.js",
    "content": "import assignAudioParam from '../assignAudioParam';\n\ndescribe('assignAudioParam', () => {\n    it('should do nothing if a value is not provided', () => {\n        const param = {};\n        assignAudioParam(param, null, 1);\n        expect(param).toEqual({});\n    });\n\n    it('should invoke the value with the param and current time when it`s a function', () => {\n        const param = {};\n        const value = jest.fn();\n        const currentTime = 6;\n\n        assignAudioParam(param, value, currentTime);\n\n        expect(value).toHaveBeenCalledTimes(1);\n        expect(value).toHaveBeenCalledWith(param, currentTime);\n    });\n\n    it('should invoke each value with the param and currenttime when it`s an array of functions', () => {\n        const param = {};\n        const value = [jest.fn(), jest.fn(), jest.fn()];\n        const currentTime = 9;\n\n        assignAudioParam(param, value, currentTime);\n\n        value.forEach(mutator => {\n            expect(mutator).toHaveBeenCalledTimes(1);\n            expect(mutator).toHaveBeenCalledWith(param, currentTime);\n        });\n    });\n\n    it('should assign the value to the param`s value property if it is not a function', () => {\n        const param = {};\n        const value = 5;\n\n        assignAudioParam(param, value);\n\n        expect(param.value).toEqual(value);\n    });\n});\n"
  },
  {
    "path": "src/paramMutations/__tests__/createParamMutator.test.js",
    "content": "import createParamMutator from '../createParamMutator';\n\ndescribe('createParamMutator', () => {\n    it('should create a func for public use which returns an inner function to manipulate AudioParams', () => {\n        const audioParam = { setValueAtTime: jest.fn() };\n        const setValueAtTime = createParamMutator('setValueAtTime');\n        const mutateAudioParam = setValueAtTime(300, 7);\n\n        mutateAudioParam(audioParam, 3);\n\n        expect(audioParam.setValueAtTime).toHaveBeenCalledTimes(1);\n        expect(audioParam.setValueAtTime).toHaveBeenCalledWith(300, 10);\n    });\n});\n"
  },
  {
    "path": "src/paramMutations/assignAudioParam.js",
    "content": "const isMutation = value => typeof value === 'function';\n\nconst isMutationSequence = value =>\n    Array.isArray(value) && value.every(isMutation);\n\nconst assignAudioParam = (param, value, currentTime) => {\n    if (!value) {\n        return;\n    }\n\n    if (isMutation(value)) {\n        value(param, currentTime);\n    } else if (isMutationSequence(value)) {\n        value.forEach(paramMutation => paramMutation(param, currentTime));\n    } else {\n        param.value = value;\n    }\n};\n\nexport default assignAudioParam;\n"
  },
  {
    "path": "src/paramMutations/createParamMutator.js",
    "content": "/* Since AudioParam methods follow a\n * consistent signature, this function\n * allows one to trivially create functions\n * that will be consumed by users and\n * components to schedule value changes. */\n\nconst createParamMutator = name =>\n    (value, time) =>\n        (param, currentTime) => {\n            param[name](value, currentTime + time);\n        };\n\nexport default createParamMutator;\n"
  },
  {
    "path": "src/paramMutations/index.js",
    "content": "import createParamMutator from './createParamMutator';\n\nexport const setValueAtTime = createParamMutator('setValueAtTime');\nexport const linearRampToValueAtTime = createParamMutator('linearRampToValueAtTime');\nexport const exponentialRampToValueAtTime = createParamMutator('exponentialRampToValueAtTime');\nexport const setTargetAtTime = createParamMutator('setTargetAtTime');\nexport const setValueCurveAtTime = createParamMutator('setValueCurveAtTime');\n"
  },
  {
    "path": "src/renderAudioGraph.js",
    "content": "import connectNodes from './connectNodes';\n\nexport const renderAudioGraph = (\n    createGraphElement,\n    context = new AudioContext(),\n) => {\n    const nodes = createGraphElement(context);\n    connectNodes(nodes);\n    return nodes;\n};\n\nexport const renderPersistentAudioGraph = (\n    createGraphElement,\n    context = new AudioContext(),\n) => {\n    let nodes = renderAudioGraph(createGraphElement, context);\n\n    return createNewGraphElement => {\n        nodes = createNewGraphElement(context, nodes);\n    };\n};\n"
  }
]