[
  {
    "path": ".editorconfig",
    "content": "root = true\n\n[*]\nend_of_line = lf\ninsert_final_newline = true\nindent_style = tab\nindent_size = 4\ntrim_trailing_whitespace = true\n\n[*.yml]\nindent_style = space\nindent_size = 2\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\non:\n  push:\n  workflow_dispatch:\n  schedule:\n    - cron: 0 20 * * *\n\nenv:\n  FORCE_COLOR: \"1\"\n\njobs:\n  check:\n    strategy:\n      fail-fast: false\n      matrix:\n        os: [ubuntu-latest, macos-latest, windows-latest, macos-15-intel]\n    runs-on: ${{ matrix.os }}\n    steps:\n      - uses: actions/checkout@v4\n      - uses: laytan/setup-odin@v2\n        with:\n          release: nightly\n      - name: Report\n        run: odin report\n      - name: Run client example\n        run: odin run examples/client\n        timeout-minutes: 1\n      - name: Odin check\n        if: success() || failure()\n        run: odin check examples/complete -vet --strict-style && odin check examples/client -vet --strict-style\n        timeout-minutes: 1\n"
  },
  {
    "path": ".github/workflows/docs.yml",
    "content": "name: Deploy docs to GitHub pages\n\non:\n  push:\n    branches: [main]\n  workflow_dispatch:\n\nenv:\n  FORCE_COLOR: \"1\"\n\npermissions:\n  contents: read\n  pages: write\n  id-token: write\n\nconcurrency:\n  group: \"pages\"\n  cancel-in-progress: true\n\njobs:\n  docs:\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n    runs-on: ubuntu-latest\n    steps:\n      - uses: laytan/setup-odin@v2\n        with:\n          release: nightly\n      - name: Report\n        run: odin report\n      - name: Get commonmark\n        run: sudo apt-get install libcmark-dev\n      - name: Get and build Odin docs generator\n        run: |\n          cd /home/runner\n          git clone https://github.com/odin-lang/pkg.odin-lang.org odin-doc\n          cd odin-doc\n          # The /home/runner/odin directory is in the PATH so output it there.\n          odin build . -out:/home/runner/odin/odin-doc\n          cd /home/runner\n      - uses: actions/checkout@v4\n      - name: Generate documentation\n        run: ./docs/generate.sh\n      - uses: actions/configure-pages@v3\n      - uses: actions/upload-pages-artifact@v3\n        with:\n          path: ./docs/build\n      - uses: actions/deploy-pages@v4\n        id: deployment\n"
  },
  {
    "path": ".github/workflows/openssl.yml",
    "content": "name: OpenSSL\non:\n  push:\n    paths: [\".github/workflows/openssl.yml\"]\n    branches: [\"main\"]\n  workflow_dispatch:\n  schedule:\n    - cron: 0 20 * * *\n\nenv:\n  FORCE_COLOR: \"1\"\n\nconcurrency:\n  group: \"openssl\"\n  cancel-in-progress: true\n\njobs:\n  check-updates:\n    runs-on: windows-latest\n    steps:\n      - uses: actions/checkout@v4\n\n      - id: current-release\n        shell: bash\n        run: |\n          VERSION=$(cat openssl/.version)\n          echo \"version=$VERSION\" >> $GITHUB_OUTPUT\n          echo \"current version is $VERSION\"\n\n      - uses: actions/github-script@v7\n        id: latest-release\n        with:\n          script: |\n            const latestRelease = await github.rest.repos.getLatestRelease({\n              owner: 'openssl',\n              repo: 'openssl',\n            });\n            core.setOutput('version', latestRelease.data.tag_name);\n\n            const asset = latestRelease.data.assets.find(asset => asset.name.endsWith('.tar.gz'));\n            if (asset) {\n              core.setOutput('url', asset.browser_download_url);\n              core.setOutput('version', latestRelease.data.tag_name);\n              core.info('latest version is ' + latestRelease.data.tag_name);\n            } else {\n              core.setFailed('No .tar.gz asset found in the latest release.');\n            }\n\n      - name: update .version\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        shell: bash\n        run: |\n          echo \"${{ steps.latest-release.outputs.version }}\" > openssl/.version\n\n      - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n\n      - uses: ilammy/setup-nasm@13cbeb366c45c4379d3478cdcbadd8295feb5028\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n\n      - name: download release\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        shell: bash\n        run: |\n          curl -L -o openssl.tar.gz ${{ steps.latest-release.outputs.url }}\n          file openssl.tar.gz\n\n      - name: unzip release\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        shell: bash\n        run: |\n          tar -xzf openssl.tar.gz\n\n      - name: configure\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        run: |\n          cd ${{ steps.latest-release.outputs.version }}\n          perl Configure VC-WIN64A-HYBRIDCRT no-legacy no-deprecated no-tls-deprecated-ec no-quic no-uplink --release --api=3.0\n\n      - name: compile\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        run: |\n          cd ${{ steps.latest-release.outputs.version }}\n          nmake\n\n      - name: test\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        run: |\n          cd ${{ steps.latest-release.outputs.version }}\n          nmake test\n\n      - name: copy & clean\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        shell: bash\n        run: |\n          rm -rf openssl/includes/windows/*\n\n          cd ${{ steps.latest-release.outputs.version }}\n          dir\n          cp libcrypto.lib ../openssl/includes/windows\n          cp libssl.lib ../openssl/includes/windows\n          cp libcrypto_static.lib ../openssl/includes/windows\n          cp libssl_static.lib ../openssl/includes/windows\n\n          cd ..\n          rm -rf openssl.tar.gz\n          rm -rf ${{ steps.latest-release.outputs.version }}\n\n      - name: pr\n        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}\n        uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c\n        with:\n          title: |\n            Update bundled OpenSSL libraries to ${{ steps.latest-release.outputs.version }}\n          commit-message: |\n            openssl: update bundled libraries to ${{ steps.latest-release.outputs.version }}\n"
  },
  {
    "path": ".gitignore",
    "content": "*.bin\nols.json\nopm\nTaskfile.yml\n*.exe\ndocs/build\n\n# Example binaries.\nminimal\ncomplete\nreadme\nrouting\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2023 Laytan Laats\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# Odin HTTP\n\nA HTTP/1.1 implementation for Odin purely written in Odin (besides SSL).\n\nSee generated package documentation at [odin-http.laytan.dev](https://odin-http.laytan.dev).\n\nSee below examples or the examples directory.\n\n## Compatibility\n\nThis is beta software, confirmed to work in my own use cases but can certainly contain edge cases and bugs that I did not catch.\nPlease file issues for any bug or suggestion you encounter/have.\n\nI am usually on a recent master version of Odin and commits will be made with new features if applicable, backwards compatibility or even\nstable version compatibility is not currently a thing.\n\nBecause this is still heavily in development, I do not hesitate to push API changes at the moment, so beware.\n\nThe package has been tested to work with Ubuntu Linux (other \"normal\" distros should work), MacOS (m1 and intel), and Windows 64 bit.\nAny other distributions or versions have not been tested and might not work.\n\n## Dependencies\n\nThe *client* package depends on OpenSSL for making HTTPS requests.\n\nThis repository contains a copy of these libraries for ease of use on Windows.\n\nFor Linux, most distros come with OpenSSL, if not you can install it with a package manager, usually under `libssl3`.\n\n## Performance\n\nSome small benchmarks have been done in the comparisons directory.\n\nMy main priority in terms of performance is currently Linux (because most servers end up there in production).\n\nOther targets are still made to be performant, but benchmarking etc. is mostly done on Linux.\n\n## IO implementations\n\nAlthough these implementation details are not exposed when using the package, these are the underlying kernel API's that are used.\n\n- Windows: [IOCP (IO Completion Ports)](https://en.wikipedia.org/wiki/Input/output_completion_port)\n- Linux:   [io_uring](https://en.wikipedia.org/wiki/Io_uring)\n- Darwin:  [KQueue](https://en.wikipedia.org/wiki/Kqueue)\n\nThe IO part of this package can be used on its own for other types of applications, see the nbio directory for the documentation on that.\nIt has APIs for reading, writing, opening, closing, seeking files and accepting, connecting, sending, receiving and closing sockets, both UDP and TCP, fully cross-platform.\n\n## Server example\n\n```odin\npackage main\n\nimport \"core:fmt\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:time\"\n\nimport http \"../..\" // Change to path of package.\n\nmain :: proc() {\n\tcontext.logger = log.create_console_logger(.Info)\n\n\ts: http.Server\n\t// Register a graceful shutdown when the program receives a SIGINT signal.\n\thttp.server_shutdown_on_interrupt(&s)\n\n\t// Set up routing\n\trouter: http.Router\n\thttp.router_init(&router)\n\tdefer http.router_destroy(&router)\n\n\t// Routes are tried in order.\n\t// Route matching is implemented using an implementation of Lua patterns, see the docs on them here:\n\t// https://www.lua.org/pil/20.2.html\n\t// They are very similar to regex patterns but a bit more limited, which makes them much easier to implement since Odin does not have a regex implementation.\n\n\t// Matches /users followed by any word (alphanumeric) followed by /comments and then / with any number.\n\t// The word is available as req.url_params[0], and the number as req.url_params[1].\n\thttp.route_get(&router, \"/users/(%w+)/comments/(%d+)\", http.handler(proc(req: ^http.Request, res: ^http.Response) {\n\t\thttp.respond_plain(res, fmt.tprintf(\"user %s, comment: %s\", req.url_params[0], req.url_params[1]))\n\t}))\n\thttp.route_get(&router, \"/cookies\", http.handler(cookies))\n\thttp.route_get(&router, \"/api\", http.handler(api))\n\thttp.route_get(&router, \"/ping\", http.handler(ping))\n\thttp.route_get(&router, \"/index\", http.handler(index))\n\n\t// Matches every get request that did not match another route.\n\thttp.route_get(&router, \"(.*)\", http.handler(static))\n\n\thttp.route_post(&router, \"/ping\", http.handler(post_ping))\n\n\trouted := http.router_handler(&router)\n\n\tlog.info(\"Listening on http://localhost:6969\")\n\n\terr := http.listen_and_serve(&s, routed, net.Endpoint{address = net.IP4_Loopback, port = 6969})\n\tfmt.assertf(err == nil, \"server stopped with error: %v\", err)\n}\n\ncookies :: proc(req: ^http.Request, res: ^http.Response) {\n\tappend(\n\t\t&res.cookies,\n\t\thttp.Cookie{\n\t\t\tname         = \"Session\",\n\t\t\tvalue        = \"123\",\n\t\t\texpires_gmt  = time.now(),\n\t\t\tmax_age_secs = 10,\n\t\t\thttp_only    = true,\n\t\t\tsame_site    = .Lax,\n\t\t},\n\t)\n\thttp.respond_plain(res, \"Yo!\")\n}\n\napi :: proc(req: ^http.Request, res: ^http.Response) {\n\tif err := http.respond_json(res, req.line); err != nil {\n\t\tlog.errorf(\"could not respond with JSON: %s\", err)\n\t}\n}\n\nping :: proc(req: ^http.Request, res: ^http.Response) {\n\thttp.respond_plain(res, \"pong\")\n}\n\nindex :: proc(req: ^http.Request, res: ^http.Response) {\n\thttp.respond_file(res, \"examples/complete/static/index.html\")\n}\n\nstatic :: proc(req: ^http.Request, res: ^http.Response) {\n\thttp.respond_dir(res, \"/\", \"examples/complete/static\", req.url_params[0])\n}\n\npost_ping :: proc(req: ^http.Request, res: ^http.Response) {\n\thttp.body(req, len(\"ping\"), res, proc(res: rawptr, body: http.Body, err: http.Body_Error) {\n\t\tres := cast(^http.Response)res\n\n\t\tif err != nil {\n\t\t\thttp.respond(res, http.body_error_status(err))\n\t\t\treturn\n\t\t}\n\n\t\tif body != \"ping\" {\n\t\t\thttp.respond(res, http.Status.Unprocessable_Content)\n\t\t\treturn\n\t\t}\n\n\t\thttp.respond_plain(res, \"pong\")\n\t})\n}\n```\n\n## Client example\n\n```odin\npackage main\n\nimport \"core:fmt\"\n\nimport \"../../client\"\n\nmain :: proc() {\n\tget()\n\tpost()\n}\n\n// basic get request.\nget :: proc() {\n\tres, err := client.get(\"https://www.google.com/\")\n\tif err != nil {\n\t\tfmt.printf(\"Request failed: %s\", err)\n\t\treturn\n\t}\n\tdefer client.response_destroy(&res)\n\n\tfmt.printf(\"Status: %s\\n\", res.status)\n\tfmt.printf(\"Headers: %v\\n\", res.headers)\n\tfmt.printf(\"Cookies: %v\\n\", res.cookies)\n\tbody, allocation, berr := client.response_body(&res)\n\tif berr != nil {\n\t\tfmt.printf(\"Error retrieving response body: %s\", berr)\n\t\treturn\n\t}\n\tdefer client.body_destroy(body, allocation)\n\n\tfmt.println(body)\n}\n\nPost_Body :: struct {\n\tname:    string,\n\tmessage: string,\n}\n\n// POST request with JSON.\npost :: proc() {\n\treq: client.Request\n\tclient.request_init(&req, .Post)\n\tdefer client.request_destroy(&req)\n\n\tpbody := Post_Body{\"Laytan\", \"Hello, World!\"}\n\tif err := client.with_json(&req, pbody); err != nil {\n\t\tfmt.printf(\"JSON error: %s\", err)\n\t\treturn\n\t}\n\n\tres, err := client.request(&req, \"https://webhook.site/YOUR-ID-HERE\")\n\tif err != nil {\n\t\tfmt.printf(\"Request failed: %s\", err)\n\t\treturn\n\t}\n\tdefer client.response_destroy(&res)\n\n\tfmt.printf(\"Status: %s\\n\", res.status)\n\tfmt.printf(\"Headers: %v\\n\", res.headers)\n\tfmt.printf(\"Cookies: %v\\n\", res.cookies)\n\n\tbody, allocation, berr := client.response_body(&res)\n\tif berr != nil {\n\t\tfmt.printf(\"Error retrieving response body: %s\", berr)\n\t\treturn\n\t}\n\tdefer client.body_destroy(body, allocation)\n\n\tfmt.println(body)\n}\n```\n"
  },
  {
    "path": "allocator.odin",
    "content": "#+private\n#+build ignore\npackage http\n\n// NOTE: currently not in use, had a strange crash I can't figure out.\n\nimport \"core:container/queue\"\nimport \"core:log\"\nimport \"core:mem\"\n\n// Defaults, reassigned when server is set up.\ninitial_block_cap      := mem.Kilobyte * 256\nmax_free_blocks_queued := 64\n\n// A lean, growing, block based allocator.\n//\n// The first block is kept around after a `free_all` and only free'd using `allocator_destroy`,\n// so it doesn't have to allocate it each time.\n//\n// Blocks start at the `initial_block_cap` (configurable) size and double in size after each new block.\n//\n// The last allocation is saved and can be freed with `free_with_size` or resized without\n// taking up a whole new region in the block.\nAllocator :: struct {\n\tparent:     mem.Allocator,\n\tcurr:       ^Block,\n\tcap:        int,\n\tlast_alloc: rawptr,\n}\n\nBlock :: struct {\n\tprev:       Maybe(^Block),\n\tsize:       int,\n\ttotal_size: int,\n\toffset:     int,\n\tdata:       [0]byte,\n}\n\nallocator :: proc(a: ^Allocator) -> mem.Allocator {\n\treturn {\n\t\tprocedure = allocator_proc,\n\t\tdata      = a,\n\t}\n}\n\nallocator_init :: proc(a: ^Allocator, parent := context.allocator, loc := #caller_location) -> mem.Allocator_Error {\n\ta.parent = parent\n\ta.cap = initial_block_cap\n\ta.curr = allocator_new_block(a, 0, 0, loc) or_return\n\treturn nil\n}\n\nallocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,\n                             size, alignment: int,\n                             old_memory: rawptr, old_size: int,\n                             loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {\n\n\ta := (^Allocator)(allocator_data)\n\tswitch mode {\n\tcase .Alloc:\n\t\treturn allocator_alloc_zerod(a, size, alignment, loc)\n\n\tcase .Alloc_Non_Zeroed:\n\t\treturn allocator_alloc_non_zerod(a, size, alignment, loc)\n\n\tcase .Free:\n\t\t// We can only free if this was the last allocation done.\n\t\tif old_memory == a.last_alloc {\n\t\t\ta.curr.offset -= old_size\n\t\t\ta.last_alloc = nil\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, .Mode_Not_Implemented\n\n\tcase .Free_All:\n\t\tallocator_free_all(a, loc)\n\t\treturn\n\n\tcase .Resize, .Resize_Non_Zeroed:\n\t\t// Shrink, if it was the last alloc also decrease from block offset.\n\t\tif old_size >= size {\n\t\t\tif a.last_alloc == old_memory {\n\t\t\t\ta.curr.offset -= old_size - size\n\t\t\t}\n\n\t\t\treturn mem.byte_slice(old_memory, size), nil\n\t\t}\n\n\t\t// If this was the last alloc, and we have space in it's block, keep same spot and just\n\t\t// increase the offset.\n\t\tif a.last_alloc == old_memory {\n\t\t\tneeded := size - old_size\n\t\t\tgot    := a.curr.size - a.curr.offset\n\t\t\tif needed <= got {\n\t\t\t\ta.curr.offset += needed\n\t\t\t\treturn mem.byte_slice(old_memory, size), nil\n\t\t\t}\n\t\t}\n\n\t\t// Resize with older than last allocation or doesn't fit in block, need to allocate new mem.\n\t\tbytes = allocator_alloc_non_zerod(a, size, alignment, loc) or_return\n\t\tcopy(bytes, mem.byte_slice(old_memory, old_size))\n\t\treturn\n\n\tcase .Query_Features:\n\t\tset := (^mem.Allocator_Mode_Set)(old_memory)\n\t\tif set != nil {\n\t\t\tset^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}\n\t\t}\n\t\treturn nil, nil\n\n\tcase .Query_Info:\n\t\treturn nil, .Mode_Not_Implemented\n\n\tcase: unreachable()\n\t}\n}\n\nallocator_new_block :: proc(a: ^Allocator, min_size: int, alignment: int, loc := #caller_location) -> (b: ^Block, err: mem.Allocator_Error) {\n\tbase_offset := max(alignment, size_of(Block))\n\ttotal       := max(a.cap, min_size + base_offset)\n\ta.cap       *= 2\n\n\tassert_has_td(loc)\n\tif bucket, has_bucket := &td.free_temp_blocks[total]; has_bucket {\n\t\tif block, has_block := queue.pop_back_safe(bucket); has_block {\n\t\t\tb = block\n\t\t\ttd.free_temp_blocks_count -= 1\n\t\t}\n\t}\n\n\tif b == nil {\n\t\tdata := mem.alloc(total, max(16, align_of(Block)), a.parent, loc) or_return\n\t\tb     = (^Block)(data)\n\t}\n\n\tb.total_size = total\n\tb.size       = total - base_offset\n\tb.offset     = base_offset\n\tb.prev       = a.curr\n\ta.curr       = b\n\treturn\n}\n\nallocator_alloc_zerod :: proc(a: ^Allocator, size: int, alignment: int, loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {\n\tbytes, err = allocator_alloc_non_zerod(a, size, alignment, loc)\n\tmem.zero_slice(bytes)\n\treturn\n}\n\nallocator_alloc_non_zerod :: proc(a: ^Allocator, size: int, alignment: int, loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {\n\tif size == 0 { return }\n\n\tblock := a.curr\n\tdata := ([^]byte)(&block.data)\n\n\tassert(block != nil, \"you must initialize the allocator first\", loc)\n\tassert(alignment & (alignment-1) == 0, \"non-power of two alignment\", loc)\n\n\t// TODO: handle int overflows.\n\n\tneeded := int(mem.align_forward_uint(uint(size), uint(alignment)))\n\tif block.offset + needed > block.size {\n\t\tblock = allocator_new_block(a, needed, alignment, loc) or_return\n\t\tdata  = ([^]byte)(&block.data)\n\t}\n\n\talignment_offset := 0; {\n\t\tptr  := uintptr(data[block.offset:])\n\t\tmask := uintptr(alignment-1)\n\t\tif ptr & mask != 0 {\n\t\t\talignment_offset = int(uintptr(alignment) - (ptr & mask))\n\t\t}\n\t}\n\n\tblock.offset += alignment_offset\n\tbytes = data[block.offset:][:size]\n\tblock.offset += size\n\ta.last_alloc = raw_data(bytes)\n\treturn\n}\n\nallocator_free_all :: proc(a: ^Allocator, loc := #caller_location) -> (blocks: int, total_size: int, total_used: int) {\n\tblocks += 1\n\ttotal_size += a.curr.size + size_of(Block)\n\ttotal_used += a.curr.offset\n\n\tfor a.curr.prev != nil {\n\t\tblock      := a.curr\n\t\tblocks     += 1\n\t\ttotal_size += block.total_size\n\t\ttotal_used += block.offset\n\t\ta.curr      = block.prev.?\n\t\tallocator_free_block(a, block, loc)\n\t}\n\n\ta.curr.offset = 0\n\ta.cap = initial_block_cap\n\treturn\n}\n\nallocator_destroy :: proc(a: ^Allocator, loc := #caller_location) {\n\tallocator_free_all(a, loc)\n\tallocator_free_block(a, a.curr, loc)\n}\n\nallocator_free_block :: proc(a: ^Allocator, b: ^Block, loc := #caller_location) {\n\tassert_has_td(loc)\n\n\tif td.free_temp_blocks_count > max_free_blocks_queued {\n\t\tfree(b, a.parent)\n\t\tlog.debug(\"max temp blocks reached, freeing the block\")\n\t\treturn\n\t}\n\n\tbucket, is_initialized := &td.free_temp_blocks[b.total_size]\n\tif !is_initialized {\n\t\ttd.free_temp_blocks[b.total_size] = {}\n\t\tbucket = &td.free_temp_blocks[b.total_size]\n\t\tqueue.init(bucket, max_free_blocks_queued, allocator=td.free_temp_blocks.allocator)\n\t}\n\n\tb.prev = nil\n\tqueue.push(bucket, b)\n\ttd.free_temp_blocks_count += 1\n}\n\nimport \"core:testing\"\n\n@(test)\ntest_allocator_alignment_boundary :: proc(t: ^testing.T) {\n\tarena: Allocator\n\tallocator_init(&arena)\n\tcontext.allocator = allocator(&arena)\n\n\t_, _ = mem.alloc(int(arena.cap)-120)\n\t_, err := mem.alloc(112, 32)\n\ttesting.expect_value(t, err, nil)\n}\n\n@(test)\ntest_temp_allocator_big_alloc_and_alignment :: proc(t: ^testing.T) {\n\tarena: Allocator\n\tallocator_init(&arena)\n\tcontext.allocator = allocator(&arena)\n\n\tmappy: map[[8]int]int\n\terr := reserve(&mappy, 50000)\n\ttesting.expect_value(t, err, nil)\n}\n\n@(test)\ntest_temp_allocator_returns_correct_size :: proc(t: ^testing.T) {\n\tarena: Allocator\n\tallocator_init(&arena)\n\tcontext.allocator = allocator(&arena)\n\n\tbytes, err := mem.alloc_bytes(10, 16)\n\ttesting.expect_value(t, err, nil)\n\ttesting.expect_value(t, len(bytes), 10)\n}\n"
  },
  {
    "path": "body.odin",
    "content": "package http\n\nimport \"core:bufio\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:strings\"\n\nBody :: string\n\nBody_Callback :: #type proc(user_data: rawptr, body: Body, err: Body_Error)\n\nBody_Error :: bufio.Scanner_Error\n\n/*\nRetrieves the request's body.\n\nIf the request has the chunked Transfer-Encoding header set, the chunks are all read and returned.\nOtherwise, the Content-Length header is used to determine what to read and return it.\n\n`max_length` can be used to set a maximum amount of bytes we try to read, once it goes over this,\nan error is returned.\n\nDo not call this more than once.\n\n**Tip** If an error is returned, easily respond with an appropriate error code like this, `http.respond(res, http.body_error_status(err))`.\n*/\nbody :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {\n\tassert(req._body_ok == nil, \"you can only call body once per request\")\n\n\tenc_header, ok := headers_get_unsafe(req.headers, \"transfer-encoding\")\n\tif ok && strings.has_suffix(enc_header, \"chunked\") {\n\t\t_body_chunked(req, max_length, user_data, cb)\n\t} else {\n\t\t_body_length(req, max_length, user_data, cb)\n\t}\n}\n\n/*\nParses a URL encoded body, aka bodies with the 'Content-Type: application/x-www-form-urlencoded'.\n\nKey&value pairs are percent decoded and put in a map.\n*/\nbody_url_encoded :: proc(plain: Body, allocator := context.temp_allocator) -> (res: map[string]string, ok: bool) {\n\n\tinsert :: proc(m: ^map[string]string, plain: string, keys: int, vals: int, end: int, allocator := context.temp_allocator) -> bool {\n\t\thas_value := vals != -1\n\t\tkey_end   := vals - 1 if has_value else end\n\t\tkey       := plain[keys:key_end]\n\t\tval       := plain[vals:end] if has_value else \"\"\n\n\t\t// PERF: this could be a hot spot and I don't like that we allocate the decoded key and value here.\n\t\tkeye := (net.percent_decode(key, allocator) or_return) if strings.index_byte(key, '%') > -1 else key\n\t\tvale := (net.percent_decode(val, allocator) or_return) if has_value && strings.index_byte(val, '%') > -1 else val\n\n\t\tm[keye] = vale\n\t\treturn true\n\t}\n\n\tcount := 1\n\tfor b in plain {\n\t\tif b == '&' { count += 1 }\n\t}\n\n\tqueries := make(map[string]string, count, allocator)\n\n\tkeys := 0\n\tvals := -1\n\tfor b, i in plain {\n\t\tswitch b {\n\t\tcase '=':\n\t\t\tvals = i + 1\n\t\tcase '&':\n\t\t\tinsert(&queries, plain, keys, vals, i) or_return\n\t\t\tkeys = i + 1\n\t\t\tvals = -1\n\t\t}\n\t}\n\n\tinsert(&queries, plain, keys, vals, len(plain)) or_return\n\n\treturn queries, true\n}\n\n// Returns an appropriate status code for the given body error.\nbody_error_status :: proc(e: Body_Error) -> Status {\n\tswitch t in e {\n\tcase bufio.Scanner_Extra_Error:\n\t\tswitch t {\n\t\tcase .Too_Long:                            return .Payload_Too_Large\n\t\tcase .Too_Short, .Bad_Read_Count:          return .Bad_Request\n\t\tcase .Negative_Advance, .Advanced_Too_Far: return .Internal_Server_Error\n\t\tcase .None:                                return .OK\n\t\tcase:\n\t\t\treturn .Internal_Server_Error\n\t\t}\n\tcase io.Error:\n\t\tswitch t {\n\t\tcase .EOF, .Unknown, .No_Progress, .Unexpected_EOF:\n\t\t\treturn .Bad_Request\n\t\tcase .Empty, .Short_Write, .Buffer_Full, .Short_Buffer,\n\t\t     .Invalid_Write, .Negative_Read, .Invalid_Whence, .Invalid_Offset,\n\t\t     .Invalid_Unread, .Negative_Write, .Negative_Count,\n\t\t     .Permission_Denied, .No_Size, .Closed:\n\t\t\treturn .Internal_Server_Error\n\t\tcase .None:\n\t\t\treturn .OK\n\t\tcase:\n\t\t\treturn .Internal_Server_Error\n\t\t}\n\tcase: unreachable()\n\t}\n}\n\n\n// \"Decodes\" a request body based on the content length header.\n// Meant for internal usage, you should use `http.request_body`.\n_body_length :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {\n\treq._body_ok = false\n\n\tlen, ok := headers_get_unsafe(req.headers, \"content-length\")\n\tif !ok {\n\t\tcb(user_data, \"\", nil)\n\t\treturn\n\t}\n\n\tilen, lenok := strconv.parse_int(len, 10)\n\tif !lenok {\n\t\tcb(user_data, \"\", .Bad_Read_Count)\n\t\treturn\n\t}\n\n\tif max_length > -1 && ilen > max_length {\n\t\tcb(user_data, \"\", .Too_Long)\n\t\treturn\n\t}\n\n\tif ilen == 0 {\n\t\treq._body_ok = true\n\t\tcb(user_data, \"\", nil)\n\t\treturn\n\t}\n\n\treq._scanner.max_token_size = ilen\n\n\treq._scanner.split          = scan_num_bytes\n\treq._scanner.split_data     = rawptr(uintptr(ilen))\n\n\treq._body_ok = true\n\tscanner_scan(req._scanner, user_data, cb)\n}\n\n/*\n\"Decodes\" a chunked transfer encoded request body.\nMeant for internal usage, you should use `http.request_body`.\n\nPERF: this could be made non-allocating by writing over the part of the body that contains the\nmetadata with the rest of the body, and then returning a slice of that, but it is some effort and\nI don't think this functionality of HTTP is used that much anyway.\n\nRFC 7230 4.1.3 pseudo-code:\n\nlength := 0\nread chunk-size, chunk-ext (if any), and CRLF\nwhile (chunk-size > 0) {\n   read chunk-data and CRLF\n   append chunk-data to decoded-body\n   length := length + chunk-size\n   read chunk-size, chunk-ext (if any), and CRLF\n}\nread trailer field\nwhile (trailer field is not empty) {\n   if (trailer field is allowed to be sent in a trailer) {\n   \tappend trailer field to existing header fields\n   }\n   read trailer-field\n}\nContent-Length := length\nRemove \"chunked\" from Transfer-Encoding\nRemove Trailer from existing header fields\n*/\n_body_chunked :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {\n\treq._body_ok = false\n\n\ton_scan :: proc(s: rawptr, size_line: string, err: bufio.Scanner_Error) {\n\t\ts := cast(^Chunked_State)s\n\t\tsize_line := size_line\n\n\t\tif err != nil {\n\t\t\ts.cb(s.user_data, \"\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// If there is a semicolon, discard everything after it,\n\t\t// that would be chunk extensions which we currently have no interest in.\n\t\tif semi := strings.index_byte(size_line, ';'); semi > -1 {\n\t\t\tsize_line = size_line[:semi]\n\t\t}\n\n\t\tsize, ok := strconv.parse_int(string(size_line), 16)\n\t\tif !ok {\n\t\t\tlog.infof(\"Encountered an invalid chunk size when decoding a chunked body: %q\", string(size_line))\n\t\t\ts.cb(s.user_data, \"\", .Bad_Read_Count)\n\t\t\treturn\n\t\t}\n\n\t\t// start scanning trailer headers.\n\t\tif size == 0 {\n\t\t\tscanner_scan(s.req._scanner, s, on_scan_trailer)\n\t\t\treturn\n\t\t}\n\n\t\tif s.max_length > -1 && strings.builder_len(s.buf) + size > s.max_length {\n\t\t\ts.cb(s.user_data, \"\", .Too_Long)\n\t\t\treturn\n\t\t}\n\n\t\ts.req._scanner.max_token_size = size\n\n\t\ts.req._scanner.split          = scan_num_bytes\n\t\ts.req._scanner.split_data     = rawptr(uintptr(size))\n\n\t\tscanner_scan(s.req._scanner, s, on_scan_chunk)\n\t}\n\n\ton_scan_chunk :: proc(s: rawptr, token: string, err: bufio.Scanner_Error) {\n\t\ts := cast(^Chunked_State)s\n\n\t\tif err != nil {\n\t\t\ts.cb(s.user_data, \"\", err)\n\t\t\treturn\n\t\t}\n\n\t\ts.req._scanner.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\t\ts.req._scanner.split          = scan_lines\n\n\t\tstrings.write_string(&s.buf, token)\n\n\t\ton_scan_empty_line :: proc(s: rawptr, token: string, err: bufio.Scanner_Error) {\n\t\t\ts := cast(^Chunked_State)s\n\n\t\t\tif err != nil {\n\t\t\t\ts.cb(s.user_data, \"\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert(len(token) == 0)\n\n\t\t\tscanner_scan(s.req._scanner, s, on_scan)\n\t\t}\n\n\t\tscanner_scan(s.req._scanner, s, on_scan_empty_line)\n\t}\n\n\ton_scan_trailer :: proc(s: rawptr, line: string, err: bufio.Scanner_Error) {\n\t\ts := cast(^Chunked_State)s\n\n\t\t// Headers are done, success.\n\t\tif err != nil || len(line) == 0 {\n\t\t\theaders_delete_unsafe(&s.req.headers, \"trailer\")\n\n\t\t\tte_header := headers_get_unsafe(s.req.headers, \"transfer-encoding\")\n\t\t\tnew_te_header := strings.trim_suffix(te_header, \"chunked\")\n\n\t\t\ts.req.headers.readonly = false\n\t\t\theaders_set_unsafe(&s.req.headers, \"transfer-encoding\", new_te_header)\n\t\t\ts.req.headers.readonly = true\n\n\t\t\ts.req._body_ok = true\n\t\t\ts.cb(s.user_data, strings.to_string(s.buf), nil)\n\t\t\treturn\n\t\t}\n\n\t\tkey, ok := header_parse(&s.req.headers, string(line))\n\t\tif !ok {\n\t\t\tlog.infof(\"Invalid header when decoding chunked body: %q\", string(line))\n\t\t\ts.cb(s.user_data, \"\", .Unknown)\n\t\t\treturn\n\t\t}\n\n\t\t// A recipient MUST ignore (or consider as an error) any fields that are forbidden to be sent in a trailer.\n\t\tif !header_allowed_trailer(key) {\n\t\t\tlog.infof(\"Invalid trailer header received, discarding it: %q\", key)\n\t\t\theaders_delete(&s.req.headers, key)\n\t\t}\n\n\t\tscanner_scan(s.req._scanner, s, on_scan_trailer)\n\t}\n\n\tChunked_State :: struct {\n\t\treq:        ^Request,\n\t\tmax_length: int,\n\t\tuser_data:  rawptr,\n\t\tcb:         Body_Callback,\n\n\t\tbuf:        strings.Builder,\n\t}\n\n\ts := new(Chunked_State, context.temp_allocator)\n\n\ts.buf.buf.allocator = context.temp_allocator\n\n\ts.req        = req\n\ts.max_length = max_length\n\ts.user_data  = user_data\n\ts.cb         = cb\n\n\ts.req._scanner.split = scan_lines\n\tscanner_scan(s.req._scanner, s, on_scan)\n}\n"
  },
  {
    "path": "client/client.odin",
    "content": "// package provides a very simple (for now) HTTP/1.1 client.\npackage client\n\nimport \"core:bufio\"\nimport \"core:bytes\"\nimport \"core:c\"\nimport \"core:encoding/json\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:strings\"\n\nimport http \"..\"\nimport openssl \"../openssl\"\n\nRequest :: struct {\n\tmethod:  http.Method,\n\theaders: http.Headers,\n\tcookies: [dynamic]http.Cookie,\n\tbody:    bytes.Buffer,\n}\n\n// Initializes the request with sane defaults using the given allocator.\nrequest_init :: proc(r: ^Request, method := http.Method.Get, allocator := context.allocator) {\n\tr.method = method\n\thttp.headers_init(&r.headers, allocator)\n\tr.cookies = make([dynamic]http.Cookie, allocator)\n\tbytes.buffer_init_allocator(&r.body, 0, 0, allocator)\n}\n\n// Destroys the request.\n// Header keys and values that the user added will have to be deleted by the user.\n// Same with any strings inside the cookies.\nrequest_destroy :: proc(r: ^Request) {\n\tdelete(r.headers._kv)\n\tdelete(r.cookies)\n\tbytes.buffer_destroy(&r.body)\n}\n\nwith_json :: proc(r: ^Request, v: any, opt: json.Marshal_Options = {}) -> json.Marshal_Error {\n\tif r.method == .Get { r.method = .Post }\n\thttp.headers_set_content_type(&r.headers, http.mime_to_content_type(.Json))\n\n\tstream := bytes.buffer_to_stream(&r.body)\n\topt := opt\n\tjson.marshal_to_writer(io.to_writer(stream), v, &opt) or_return\n\treturn nil\n}\n\nget :: proc(target: string, allocator := context.allocator) -> (Response, Error) {\n\tr: Request\n\trequest_init(&r, .Get, allocator)\n\tdefer request_destroy(&r)\n\n\treturn request(&r, target, allocator)\n}\n\nRequest_Error :: enum {\n\tOk,\n\tInvalid_Response_HTTP_Version,\n\tInvalid_Response_Method,\n\tInvalid_Response_Header,\n\tInvalid_Response_Cookie,\n}\n\nSSL_Error :: enum {\n\tOk,\n\tControlled_Shutdown,\n\tFatal_Shutdown,\n\tSSL_Write_Failed,\n}\n\nError :: union #shared_nil {\n\tnet.Dial_Error,\n\tnet.Parse_Endpoint_Error,\n\tnet.Network_Error,\n\tnet.TCP_Send_Error,\n\tbufio.Scanner_Error,\n\tRequest_Error,\n\tSSL_Error,\n}\n\nrequest :: proc(request: ^Request, target: string, allocator := context.allocator) -> (res: Response, err: Error) {\n\turl, endpoint := parse_endpoint(target) or_return\n\n\t// NOTE: we don't support persistent connections yet.\n\thttp.headers_set_close(&request.headers)\n\n\treq_buf := format_request(url, request, allocator)\n\tdefer bytes.buffer_destroy(&req_buf)\n\n\tsocket := net.dial_tcp(endpoint) or_return\n\n\t// HTTPS using openssl.\n\tif url.scheme == \"https\" {\n\t\tctx := openssl.SSL_CTX_new(openssl.TLS_client_method())\n\t\tssl := openssl.SSL_new(ctx)\n\t\topenssl.SSL_set_fd(ssl, c.int(socket))\n\n\t\t// For servers using SNI for SSL certs (like cloudflare), this needs to be set.\n\t\tchostname := strings.clone_to_cstring(url.host, allocator)\n\t\tdefer delete(chostname, allocator)\n\t\topenssl.SSL_set_tlsext_host_name(ssl, chostname)\n\n\t\tswitch openssl.SSL_connect(ssl) {\n\t\tcase 2:\n\t\t\terr = SSL_Error.Controlled_Shutdown\n\t\t\treturn\n\t\tcase 1: // success\n\t\tcase:\n\t\t\terr = SSL_Error.Fatal_Shutdown\n\t\t\treturn\n\t\t}\n\n\t\tbuf := bytes.buffer_to_bytes(&req_buf)\n\t\tto_write := len(buf)\n\t\tfor to_write > 0 {\n\t\t\tret := openssl.SSL_write(ssl, raw_data(buf), c.int(to_write))\n\t\t\tif ret <= 0 {\n\t\t\t\terr = SSL_Error.SSL_Write_Failed\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tto_write -= int(ret)\n\t\t}\n\n\t\treturn parse_response(SSL_Communication{ssl = ssl, ctx = ctx, socket = socket}, allocator)\n\t}\n\n\t// HTTP, just send the request.\n\tnet.send_tcp(socket, bytes.buffer_to_bytes(&req_buf)) or_return\n\treturn parse_response(socket, allocator)\n}\n\nResponse :: struct {\n\tstatus:    http.Status,\n\t// headers and cookies should be considered read-only, after a response is returned.\n\theaders:   http.Headers,\n\tcookies:   [dynamic]http.Cookie,\n\t_socket:   Communication,\n\t_body:     bufio.Scanner,\n\t_body_err: Body_Error,\n}\n\n// Frees the response, closes the connection.\n// Optionally pass the response_body returned 'body' and 'was_allocation' to destroy it too.\nresponse_destroy :: proc(res: ^Response, body: Maybe(Body_Type) = nil, was_allocation := false, body_allocator := context.allocator) {\n\t// Header keys are allocated, values are slices into the body.\n\t// NOTE: this is fine because we don't add any headers with `headers_set_unsafe()`.\n\t// If we did, we wouldn't know if the key was allocated or a literal.\n\t// We also set the headers to readonly before giving them to the user so they can't add any either.\n\tfor k, v in res.headers._kv {\n\t\tdelete(v, res.headers._kv.allocator)\n\t\tdelete(k, res.headers._kv.allocator)\n\t}\n\n\tdelete(res.headers._kv)\n\n\tbufio.scanner_destroy(&res._body)\n\n\tfor cookie in res.cookies {\n\t\tdelete(cookie._raw)\n\t}\n\tdelete(res.cookies)\n\n\tif body != nil {\n\t\tbody_destroy(body.(Body_Type), was_allocation, body_allocator)\n\t}\n\n\t// We close now and not at the time we got the response because reading the body,\n\t// could make more reads need to happen (like with chunked encoding).\n\tswitch comm in res._socket {\n\tcase net.TCP_Socket:\n\t\tnet.close(comm)\n\tcase SSL_Communication:\n\t\topenssl.SSL_free(comm.ssl)\n\t\topenssl.SSL_CTX_free(comm.ctx)\n\t\tnet.close(comm.socket)\n\t}\n}\n\nBody_Error :: enum {\n\tNone,\n\tNo_Length,\n\tInvalid_Length,\n\tToo_Long,\n\tScan_Failed,\n\tInvalid_Chunk_Size,\n\tInvalid_Trailer_Header,\n}\n\n// Any non-special body, could have been a chunked body that has been read in fully automatically.\n// Depending on the return value for 'was_allocation' of the parse function, this is either an\n// allocated string that you should delete or a slice into the body.\nBody_Plain :: string\n\n// A URL encoded body, map, keys and values are fully allocated on the allocator given to the parsing function,\n// And should be deleted by you.\nBody_Url_Encoded :: map[string]string\n\nBody_Type :: union #no_nil {\n\tBody_Plain,\n\tBody_Url_Encoded,\n\tBody_Error, // TODO: why is this here if we also return an error?\n}\n\n// Frees the memory allocated by parsing the body.\n// was_allocation is returned by the body parsing procedure.\nbody_destroy :: proc(body: Body_Type, was_allocation: bool, allocator := context.allocator) {\n\tswitch b in body {\n\tcase Body_Plain:\n\t\tif was_allocation { delete(b, allocator) }\n\tcase Body_Url_Encoded:\n\t\tfor k, v in b {\n\t\t\tdelete(k, b.allocator)\n\t\t\tdelete(v, b.allocator)\n\t\t}\n\t\tdelete(b)\n\tcase Body_Error:\n\t}\n}\n\n// Retrieves the response's body, can only be called once.\n// Free the returned body using body_destroy().\nresponse_body :: proc(\n\tres: ^Response,\n\tmax_length := -1,\n\tallocator := context.allocator,\n) -> (\n\tbody: Body_Type,\n\twas_allocation: bool,\n\terr: Body_Error,\n) {\n\tdefer res._body_err = err\n\tassert(res._body_err == nil)\n\tbody, was_allocation, err = _parse_body(&res.headers, &res._body, max_length, allocator)\n\treturn\n}\n\n_parse_body :: proc(\n\theaders: ^http.Headers,\n\t_body: ^bufio.Scanner,\n\tmax_length := -1,\n\tallocator := context.allocator,\n) -> (\n\tbody: Body_Type,\n\twas_allocation: bool,\n\terr: Body_Error,\n) {\n\t// See [RFC 7230 3.3.3](https://www.rfc-editor.org/rfc/rfc7230#section-3.3.3) for the rules.\n\t// Point 3 paragraph 3 and point 4 are handled before we get here.\n\n\tenc, has_enc       := http.headers_get_unsafe(headers^, \"transfer-encoding\")\n\tlength, has_length := http.headers_get_unsafe(headers^, \"content-length\")\n\tswitch {\n\tcase has_enc && strings.has_suffix(enc, \"chunked\"):\n\t\twas_allocation = true\n\t\tbody = _response_body_chunked(headers, _body, max_length, allocator) or_return\n\n\tcase has_length:\n\t\tbody = _response_body_length(_body, max_length, length) or_return\n\n\tcase:\n\t\tbody = _response_till_close(_body, max_length) or_return\n\t}\n\n\t// Automatically decode url encoded bodies.\n\tif typ, ok := http.headers_get_unsafe(headers^, \"content-type\"); ok && typ == \"application/x-www-form-urlencoded\" {\n\t\tplain := body.(Body_Plain)\n\t\tdefer if was_allocation { delete(plain, allocator) }\n\n\t\tkeyvalues := strings.split(plain, \"&\", allocator)\n\t\tdefer delete(keyvalues, allocator)\n\n\t\tqueries := make(Body_Url_Encoded, len(keyvalues), allocator)\n\t\tfor keyvalue in keyvalues {\n\t\t\tseperator := strings.index(keyvalue, \"=\")\n\t\t\tif seperator == -1 { \t// The keyvalue has no value.\n\t\t\t\tqueries[keyvalue] = \"\"\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tkey, key_decoded_ok := net.percent_decode(keyvalue[:seperator], allocator)\n\t\t\tif !key_decoded_ok {\n\t\t\t\tlog.warnf(\"url encoded body key %q could not be decoded\", keyvalue[:seperator])\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tval, val_decoded_ok := net.percent_decode(keyvalue[seperator + 1:], allocator)\n\t\t\tif !val_decoded_ok {\n\t\t\t\tlog.warnf(\"url encoded body value %q for key %q could not be decoded\", keyvalue[seperator + 1:], key)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tqueries[key] = val\n\t\t}\n\n\t\tbody = queries\n\t}\n\n\treturn\n}\n\n_response_till_close :: proc(_body: ^bufio.Scanner, max_length: int) -> (string, Body_Error) {\n\t_body.max_token_size = max_length\n\tdefer _body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\n\t_body.split = proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {\n\t\tif at_eof {\n\t\t\treturn len(data), data, nil, true\n\t\t}\n\n\t\treturn\n\t}\n\tdefer _body.split = bufio.scan_lines\n\n\tif !bufio.scanner_scan(_body) {\n\t\tif bufio.scanner_error(_body) == .Too_Long {\n\t\t\treturn \"\", .Too_Long\n\t\t}\n\n\t\treturn \"\", .Scan_Failed\n\t}\n\n\treturn bufio.scanner_text(_body), .None\n}\n\n// \"Decodes\" a response body based on the content length header.\n// Meant for internal usage, you should use `client.response_body`.\n_response_body_length :: proc(_body: ^bufio.Scanner, max_length: int, len: string) -> (string, Body_Error) {\n\tilen, lenok := strconv.parse_int(len, 10)\n\tif !lenok {\n\t\treturn \"\", .Invalid_Length\n\t}\n\n\tif max_length > -1 && ilen > max_length {\n\t\treturn \"\", .Too_Long\n\t}\n\n\tif ilen == 0 {\n\t\treturn \"\", nil\n\t}\n\n\t// user_index is used to set the amount of bytes to scan in scan_num_bytes.\n\tcontext.user_index = ilen\n\n\t_body.max_token_size = ilen\n\tdefer _body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\n\t_body.split = scan_num_bytes\n\tdefer _body.split = bufio.scan_lines\n\n\tlog.debugf(\"scanning %i bytes body\", ilen)\n\n\tif !bufio.scanner_scan(_body) {\n\t\treturn \"\", .Scan_Failed\n\t}\n\n\treturn bufio.scanner_text(_body), .None\n}\n\n// \"Decodes\" a chunked transfer encoded request body.\n// Meant for internal usage, you should use `client.response_body`.\n//\n// RFC 7230 4.1.3 pseudo-code:\n//\n// length := 0\n// read chunk-size, chunk-ext (if any), and CRLF\n// while (chunk-size > 0) {\n//    read chunk-data and CRLF\n//    append chunk-data to decoded-body\n//    length := length + chunk-size\n//    read chunk-size, chunk-ext (if any), and CRLF\n// }\n// read trailer field\n// while (trailer field is not empty) {\n//    if (trailer field is allowed to be sent in a trailer) {\n//    \tappend trailer field to existing header fields\n//    }\n//    read trailer-field\n// }\n// Content-Length := length\n// Remove \"chunked\" from Transfer-Encoding\n// Remove Trailer from existing header fields\n_response_body_chunked :: proc(\n\theaders: ^http.Headers,\n\t_body: ^bufio.Scanner,\n\tmax_length: int,\n\tallocator := context.allocator,\n) -> (\n\tbody: string,\n\terr: Body_Error,\n) {\n\tbody_buff: bytes.Buffer\n\n\tbytes.buffer_init_allocator(&body_buff, 0, 0, allocator)\n\tdefer if err != nil { bytes.buffer_destroy(&body_buff) }\n\n\tfor {\n\t\tif !bufio.scanner_scan(_body) {\n\t\t\treturn \"\", .Scan_Failed\n\t\t}\n\n\t\tsize_line := bufio.scanner_bytes(_body)\n\n\t\t// If there is a semicolon, discard everything after it,\n\t\t// that would be chunk extensions which we currently have no interest in.\n\t\tif semi := bytes.index_byte(size_line, ';'); semi > -1 {\n\t\t\tsize_line = size_line[:semi]\n\t\t}\n\n\t\tsize, ok := strconv.parse_int(string(size_line), 16)\n\t\tif !ok {\n\t\t\terr = .Invalid_Chunk_Size\n\t\t\treturn\n\t\t}\n\t\tif size == 0 { break }\n\n\t\tif max_length > -1 && bytes.buffer_length(&body_buff) + size > max_length {\n\t\t\treturn \"\", .Too_Long\n\t\t}\n\n\t\t// user_index is used to set the amount of bytes to scan in scan_num_bytes.\n\t\tcontext.user_index = size\n\n\t\t_body.max_token_size = size\n\t\t_body.split = scan_num_bytes\n\n\t\tif !bufio.scanner_scan(_body) {\n\t\t\treturn \"\", .Scan_Failed\n\t\t}\n\n\t\t_body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\t\t_body.split = bufio.scan_lines\n\n\t\tbytes.buffer_write(&body_buff, bufio.scanner_bytes(_body))\n\n\t\t// Read empty line after chunk.\n\t\tif !bufio.scanner_scan(_body) {\n\t\t\treturn \"\", .Scan_Failed\n\t\t}\n\t\tassert(bufio.scanner_text(_body) == \"\")\n\t}\n\n\t// Read trailing empty line (after body, before trailing headers).\n\tif !bufio.scanner_scan(_body) || bufio.scanner_text(_body) != \"\" {\n\t\treturn \"\", .Scan_Failed\n\t}\n\n\t// Keep parsing the request as line delimited headers until we get to an empty line.\n\tfor {\n\t\t// If there are no trailing headers, this case is hit.\n\t\tif !bufio.scanner_scan(_body) {\n\t\t\tbreak\n\t\t}\n\n\t\tline := bufio.scanner_text(_body)\n\n\t\t// The first empty line denotes the end of the headers section.\n\t\tif line == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\tkey, ok := http.header_parse(headers, line)\n\t\tif !ok {\n\t\t\treturn \"\", .Invalid_Trailer_Header\n\t\t}\n\n\t\t// A recipient MUST ignore (or consider as an error) any fields that are forbidden to be sent in a trailer.\n\t\tif !http.header_allowed_trailer(key) {\n\t\t\thttp.headers_delete(headers, key)\n\t\t}\n\t}\n\n\tif http.headers_has_unsafe(headers^, \"trailer\") {\n\t\thttp.headers_delete_unsafe(headers, \"trailer\")\n\t}\n\n\tte := strings.trim_suffix(http.headers_get_unsafe(headers^, \"transfer-encoding\"), \"chunked\")\n\n\theaders.readonly = false\n\thttp.headers_set_unsafe(headers, \"transfer-encoding\", te)\n\theaders.readonly = true\n\n\treturn bytes.buffer_to_string(&body_buff), .None\n}\n\n// A scanner bufio.Split_Proc implementation to scan a given amount of bytes.\n// The amount of bytes should be set in the context.user_index.\n@(private)\nscan_num_bytes :: proc(\n\tdata: []byte,\n\tat_eof: bool,\n) -> (\n\tadvance: int,\n\ttoken: []byte,\n\terr: bufio.Scanner_Error,\n\tfinal_token: bool,\n) {\n\tn := context.user_index // Set context.user_index to the amount of bytes to read.\n\tif at_eof && len(data) < n {\n\t\treturn\n\t}\n\n\tif len(data) < n {\n\t\treturn\n\t}\n\n\treturn n, data[:n], nil, false\n}\n"
  },
  {
    "path": "client/communication.odin",
    "content": "#+private\npackage client\n\nimport \"core:bufio\"\nimport \"core:bytes\"\nimport \"core:c\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:strings\"\n\nimport http \"..\"\nimport openssl \"../openssl\"\n\nparse_endpoint :: proc(target: string) -> (url: http.URL, endpoint: net.Endpoint, err: net.Network_Error) {\n\turl = http.url_parse(target)\n\thost_or_endpoint := net.parse_hostname_or_endpoint(url.host) or_return\n\n\tswitch t in host_or_endpoint {\n\tcase net.Endpoint:\n\t\tendpoint = t\n\t\treturn\n\tcase net.Host:\n\t\tep4, ep6 := net.resolve(t.hostname) or_return\n\t\tendpoint = ep4 if ep4.address != nil else ep6\n\n\t\tendpoint.port = t.port\n\t\tif endpoint.port == 0 {\n\t\t\tendpoint.port = url.scheme == \"https\" ? 443 : 80\n\t\t}\n\t\treturn\n\tcase:\n\t\tunreachable()\n\t}\n}\n\nformat_request :: proc(target: http.URL, request: ^Request, allocator := context.allocator) -> (buf: bytes.Buffer) {\n\t// Responses are on average at least 100 bytes, so lets start there, but add the body's length.\n\tbytes.buffer_init_allocator(&buf, 0, bytes.buffer_length(&request.body) + 100, allocator)\n\n\thttp.requestline_write(\n\t\tbytes.buffer_to_stream(&buf),\n\t\t{method = request.method, target = target, version = http.Version{1, 1}},\n\t)\n\n\tif !http.headers_has_unsafe(request.headers, \"content-length\") {\n\t\tbuf_len := bytes.buffer_length(&request.body)\n\t\tif buf_len == 0 {\n\t\t\tbytes.buffer_write_string(&buf, \"content-length: 0\\r\\n\")\n\t\t} else {\n\t\t\tbytes.buffer_write_string(&buf, \"content-length: \")\n\n\t\t\t// Make sure at least 20 bytes are there to write into, should be enough for the content length.\n\t\t\tbytes.buffer_grow(&buf, buf_len + 20)\n\n\t\t\t// Write the length into unwritten portion.\n\t\t\tunwritten := http._dynamic_unwritten(buf.buf)\n\t\t\tl := len(strconv.write_int(unwritten, i64(buf_len), 10))\n\t\t\tassert(l <= 20)\n\t\t\thttp._dynamic_add_len(&buf.buf, l)\n\n\t\t\tbytes.buffer_write_string(&buf, \"\\r\\n\")\n\t\t}\n\t}\n\n\tif !http.headers_has_unsafe(request.headers, \"accept\") {\n\t\tbytes.buffer_write_string(&buf, \"accept: */*\\r\\n\")\n\t}\n\n\tif !http.headers_has_unsafe(request.headers, \"user-agent\") {\n\t\tbytes.buffer_write_string(&buf, \"user-agent: odin-http\\r\\n\")\n\t}\n\n\tif !http.headers_has_unsafe(request.headers, \"host\") {\n\t\tbytes.buffer_write_string(&buf, \"host: \")\n\t\tbytes.buffer_write_string(&buf, target.host)\n\t\tbytes.buffer_write_string(&buf, \"\\r\\n\")\n\t}\n\n\tfor header, value in request.headers._kv {\n\t\tbytes.buffer_write_string(&buf, header)\n\t\tbytes.buffer_write_string(&buf, \": \")\n\n\t\t// Escape newlines in headers, if we don't, an attacker can find an endpoint\n\t\t// that returns a header with user input, and inject headers into the response.\n\t\tesc_value, was_allocation := strings.replace_all(value, \"\\n\", \"\\\\n\", allocator)\n\t\tdefer if was_allocation { delete(esc_value) }\n\n\t\tbytes.buffer_write_string(&buf, esc_value)\n\t\tbytes.buffer_write_string(&buf, \"\\r\\n\")\n\t}\n\n\tif len(request.cookies) > 0 {\n\t\tbytes.buffer_write_string(&buf, \"cookie: \")\n\n\t\tfor cookie, i in request.cookies {\n\t\t\tbytes.buffer_write_string(&buf, cookie.name)\n\t\t\tbytes.buffer_write_byte(&buf, '=')\n\t\t\tbytes.buffer_write_string(&buf, cookie.value)\n\n\t\t\tif i != len(request.cookies) - 1 {\n\t\t\t\tbytes.buffer_write_string(&buf, \"; \")\n\t\t\t}\n\t\t}\n\n\t\tbytes.buffer_write_string(&buf, \"\\r\\n\")\n\t}\n\n\t// Empty line denotes end of headers and start of body.\n\tbytes.buffer_write_string(&buf, \"\\r\\n\")\n\n\tbytes.buffer_write(&buf, bytes.buffer_to_bytes(&request.body))\n\treturn\n}\n\nSSL_Communication :: struct {\n\tsocket: net.TCP_Socket,\n\tssl:    ^openssl.SSL,\n\tctx:    ^openssl.SSL_CTX,\n}\n\nCommunication :: union {\n\tnet.TCP_Socket, // HTTP.\n\tSSL_Communication, // HTTPS.\n}\n\nparse_response :: proc(socket: Communication, allocator := context.allocator) -> (res: Response, err: Error) {\n\tres._socket = socket\n\n\tstream: io.Stream\n\tswitch comm in socket {\n\tcase net.TCP_Socket:\n\t\tstream = tcp_stream(comm)\n\tcase SSL_Communication:\n\t\tstream = ssl_tcp_stream(comm.ssl)\n\t}\n\n\tstream_reader := io.to_reader(stream)\n\tscanner: bufio.Scanner\n\tbufio.scanner_init(&scanner, stream_reader, allocator)\n\n\thttp.headers_init(&res.headers, allocator)\n\n\tif !bufio.scanner_scan(&scanner) {\n\t\terr = bufio.scanner_error(&scanner)\n\t\treturn\n\t}\n\n\trline_str := bufio.scanner_text(&scanner)\n\tsi := strings.index_byte(rline_str, ' ')\n\n\tversion, ok := http.version_parse(rline_str[:si])\n\tif !ok {\n\t\terr = Request_Error.Invalid_Response_HTTP_Version\n\t\treturn\n\t}\n\n\t// Might need to support more versions later.\n\tif version.major != 1 {\n\t\terr = Request_Error.Invalid_Response_HTTP_Version\n\t\treturn\n\t}\n\n\tres.status, ok = http.status_from_string(rline_str[si + 1:])\n\tif !ok {\n\t\terr = Request_Error.Invalid_Response_Method\n\t\treturn\n\t}\n\n\tres.cookies.allocator = allocator\n\n\tfor {\n\t\tif !bufio.scanner_scan(&scanner) {\n\t\t\terr = bufio.scanner_error(&scanner)\n\t\t\treturn\n\t\t}\n\n\t\tline := bufio.scanner_text(&scanner)\n\t\t// Empty line means end of headers.\n\t\tif line == \"\" { break }\n\n\t\tkey, hok := http.header_parse(&res.headers, line, allocator)\n\t\tif !hok {\n\t\t\terr = Request_Error.Invalid_Response_Header\n\t\t\treturn\n\t\t}\n\n\t\tif key == \"set-cookie\" {\n\t\t\tcookie_str := http.headers_get_unsafe(res.headers, \"set-cookie\")\n\t\t\thttp.headers_delete_unsafe(&res.headers, \"set-cookie\")\n\t\t\tdelete(key, allocator)\n\n\t\t\tcookie, cok := http.cookie_parse(cookie_str, allocator)\n\t\t\tif !cok {\n\t\t\t\terr = Request_Error.Invalid_Response_Cookie\n\t\t\t\tdelete(cookie_str, allocator)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tappend(&res.cookies, cookie)\n\t\t}\n\t}\n\n\tif !http.headers_validate(&res.headers) {\n\t\terr = Request_Error.Invalid_Response_Header\n\t\treturn\n\t}\n\n\tres.headers.readonly = true\n\n\tres._body = scanner\n\treturn res, nil\n}\n\nssl_tcp_stream :: proc(sock: ^openssl.SSL) -> (s: io.Stream) {\n\ts.data = sock\n\ts.procedure = _ssl_stream_proc\n\treturn s\n}\n\n@(private)\n_ssl_stream_proc :: proc(\n\tstream_data: rawptr,\n\tmode: io.Stream_Mode,\n\tp: []byte,\n\toffset: i64,\n\twhence: io.Seek_From,\n) -> (\n\tn: i64,\n\terr: io.Error,\n) {\n\t#partial switch mode {\n\tcase .Query:\n\t\treturn io.query_utility(io.Stream_Mode_Set{.Query, .Read})\n\tcase .Read:\n\t\tssl := cast(^openssl.SSL)stream_data\n\t\tret := openssl.SSL_read(ssl, raw_data(p), c.int(len(p)))\n\t\tif ret <= 0 {\n\t\t\treturn 0, .Unexpected_EOF\n\t\t}\n\n\t\treturn i64(ret), nil\n\tcase:\n\t\terr = .Empty\n\t}\n\treturn\n}\n\n// Wraps a tcp socket with a stream.\ntcp_stream :: proc(sock: net.TCP_Socket) -> (s: io.Stream) {\n\ts.data = rawptr(uintptr(sock))\n\ts.procedure = _socket_stream_proc\n\treturn s\n}\n\n@(private)\n_socket_stream_proc :: proc(\n\tstream_data: rawptr,\n\tmode: io.Stream_Mode,\n\tp: []byte,\n\toffset: i64,\n\twhence: io.Seek_From,\n) -> (\n\tn: i64,\n\terr: io.Error,\n) {\n\t#partial switch mode {\n\tcase .Query:\n\t\treturn io.query_utility(io.Stream_Mode_Set{.Query, .Read})\n\tcase .Read:\n\t\tsock := net.TCP_Socket(uintptr(stream_data))\n\t\treceived, recv_err := net.recv_tcp(sock, p)\n\t\tn = i64(received)\n\n\t\t#partial switch recv_err {\n\t\tcase .None:\n\t\t\terr = .None\n\t\tcase .Network_Unreachable, .Insufficient_Resources, .Invalid_Argument, .Not_Connected, .Connection_Closed, .Timeout, .Would_Block, .Interrupted:\n\t\t\tlog.errorf(\"unexpected error reading tcp: %s\", recv_err)\n\t\t\terr = .Unexpected_EOF\n\t\tcase:\n\t\t\tlog.errorf(\"unexpected error reading tcp: %s\", recv_err)\n\t\t\terr = .Unknown\n\t\t}\n\t\tcase nil:\n\t\t\terr = .None\n\t\tcase:\n\t\t\tassert(false, \"recv_tcp only returns TCP_Recv_Error or nil\")\n\t\t}\n\treturn\n}\n"
  },
  {
    "path": "comparisons/empty-ok-all/README.md",
    "content": "# Comparison - Empty OK All\n\nThis comparison measures raw IO rate, the server needs to respond to requests on port :8080 with 200 OK.\n\nOf course this is not a full picture but you can get an idea of performance.\n\n## Results\n\nTaken on Pop!_OS Linux using a AMD Ryzen 7 5800X 8-core processor.\n\nLoad is created using [Bombardier](https://github.com/codesenberg/bombardier) set to 250 connections and 10.000.000 requests.\n\nBombardier command used: `bombardier -c 250 -n 10000000 http://localhost:8080`\n\n| Language/framework | Command                                                   | Requests per second | Total time | Avg response time | Throughput |\n|--------------------|-----------------------------------------------------------|---------------------|------------|-------------------|------------|\n| Rust Actix  4.2    | `cargo build --release` (this installs 256 dependencies!) | 712k                | 14s        | 347us             | 120.8MB/s  |\n| Odin-HTTP   dev    | `odin build . -o:speed -disable-assert -no-bounds-check`  | 637k                | 15s        | 340us             | 105.2MB/s  |\n| Go net/http 1.21   | `go build main.go`                                        | 598k                | 16s        | 417us             | 77.98MB/s  |\n| Bun.serve   1.1    | `NODE_ENV=production bun run index.ts`                    | 302k                | 33s        | 827us             | 39.43MB/s  |\n| Node http   20.5   | `NODE_ENV=production node app.js`                         |  65k                | 2m35s      | 3.88ms            | 12.90MB/s  |\n"
  },
  {
    "path": "comparisons/empty-ok-all/bun/.gitignore",
    "content": "# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore\n\n# Logs\n\nlogs\n_.log\nnpm-debug.log_\nyarn-debug.log*\nyarn-error.log*\nlerna-debug.log*\n.pnpm-debug.log*\n\n# Diagnostic reports (https://nodejs.org/api/report.html)\n\nreport.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json\n\n# Runtime data\n\npids\n_.pid\n_.seed\n\\*.pid.lock\n\n# Directory for instrumented libs generated by jscoverage/JSCover\n\nlib-cov\n\n# Coverage directory used by tools like istanbul\n\ncoverage\n\\*.lcov\n\n# nyc test coverage\n\n.nyc_output\n\n# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)\n\n.grunt\n\n# Bower dependency directory (https://bower.io/)\n\nbower_components\n\n# node-waf configuration\n\n.lock-wscript\n\n# Compiled binary addons (https://nodejs.org/api/addons.html)\n\nbuild/Release\n\n# Dependency directories\n\nnode_modules/\njspm_packages/\n\n# Snowpack dependency directory (https://snowpack.dev/)\n\nweb_modules/\n\n# TypeScript cache\n\n\\*.tsbuildinfo\n\n# Optional npm cache directory\n\n.npm\n\n# Optional eslint cache\n\n.eslintcache\n\n# Optional stylelint cache\n\n.stylelintcache\n\n# Microbundle cache\n\n.rpt2_cache/\n.rts2_cache_cjs/\n.rts2_cache_es/\n.rts2_cache_umd/\n\n# Optional REPL history\n\n.node_repl_history\n\n# Output of 'npm pack'\n\n\\*.tgz\n\n# Yarn Integrity file\n\n.yarn-integrity\n\n# dotenv environment variable files\n\n.env\n.env.development.local\n.env.test.local\n.env.production.local\n.env.local\n\n# parcel-bundler cache (https://parceljs.org/)\n\n.cache\n.parcel-cache\n\n# Next.js build output\n\n.next\nout\n\n# Nuxt.js build / generate output\n\n.nuxt\ndist\n\n# Gatsby files\n\n.cache/\n\n# Comment in the public line in if your project uses Gatsby and not Next.js\n\n# https://nextjs.org/blog/next-9-1#public-directory-support\n\n# public\n\n# vuepress build output\n\n.vuepress/dist\n\n# vuepress v2.x temp and cache directory\n\n.temp\n.cache\n\n# Docusaurus cache and generated files\n\n.docusaurus\n\n# Serverless directories\n\n.serverless/\n\n# FuseBox cache\n\n.fusebox/\n\n# DynamoDB Local files\n\n.dynamodb/\n\n# TernJS port file\n\n.tern-port\n\n# Stores VSCode versions used for testing VSCode extensions\n\n.vscode-test\n\n# yarn v2\n\n.yarn/cache\n.yarn/unplugged\n.yarn/build-state.yml\n.yarn/install-state.gz\n.pnp.\\*\n"
  },
  {
    "path": "comparisons/empty-ok-all/bun/index.ts",
    "content": "const server = Bun.serve({\n\tport: 8080,\n\tfetch(req) {\n\t\treturn new Response();\n\t},\n});\n\nconsole.log(`Listening on http://localhost:${server.port}...`);\n"
  },
  {
    "path": "comparisons/empty-ok-all/bun/package.json",
    "content": "{\n  \"name\": \"empty-ok-all\",\n  \"module\": \"index.ts\",\n  \"type\": \"module\",\n  \"devDependencies\": {\n    \"bun-types\": \"latest\"\n  },\n  \"peerDependencies\": {\n    \"typescript\": \"^5.0.0\"\n  }\n}"
  },
  {
    "path": "comparisons/empty-ok-all/bun/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"lib\": [\"ESNext\"],\n    \"module\": \"esnext\",\n    \"target\": \"esnext\",\n    \"moduleResolution\": \"bundler\",\n    \"moduleDetection\": \"force\",\n    \"allowImportingTsExtensions\": true,\n    \"noEmit\": true,\n    \"composite\": true,\n    \"strict\": true,\n    \"downlevelIteration\": true,\n    \"skipLibCheck\": true,\n    \"jsx\": \"preserve\",\n    \"allowSyntheticDefaultImports\": true,\n    \"forceConsistentCasingInFileNames\": true,\n    \"allowJs\": true,\n    \"types\": [\n      \"bun-types\" // add Bun global\n    ]\n  }\n}\n"
  },
  {
    "path": "comparisons/empty-ok-all/go/main.go",
    "content": "package main\n\nimport (\n    \"net/http\"\n)\n\nfunc main() {\n    http.HandleFunc(\"/\", HelloServer)\n    http.ListenAndServe(\":8080\", nil)\n}\n\nfunc HelloServer(w http.ResponseWriter, r *http.Request) {}\n"
  },
  {
    "path": "comparisons/empty-ok-all/node/app.js",
    "content": "const http = require('http');\n\nconst hostname = '127.0.0.1';\nconst port = 8080;\n\nconst server = http.createServer((req, res) => {\n  res.statusCode = 200;\n  res.setHeader('Content-Type', 'text/plain');\n  res.end();\n});\n\nserver.listen(port, hostname, () => {\n  console.log(`Server running at http://${hostname}:${port}/`);\n});\n"
  },
  {
    "path": "comparisons/empty-ok-all/odin/main.odin",
    "content": "package empty_ok_all\n\nimport \"core:fmt\"\n\nimport http \"../../..\"\n\nmain :: proc() {\n\ts: http.Server\n\n\tfmt.println(\"Listening on http://localost:8080...\")\n\n\thandler := http.handler(proc(_: ^http.Request, res: ^http.Response) {\n\t\tres.status = .OK\n\t\thttp.respond(res)\n\t})\n\n\thttp.listen_and_serve(&s, handler)\n}\n"
  },
  {
    "path": "comparisons/empty-ok-all/rust/.gitignore",
    "content": "target\n"
  },
  {
    "path": "comparisons/empty-ok-all/rust/Cargo.toml",
    "content": "[package]\nname = \"rust\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\nactix-web = \"4.2.1\"\nserde = { version = \"1.0.145\", features = [\"derive\"] }\n"
  },
  {
    "path": "comparisons/empty-ok-all/rust/src/main.rs",
    "content": "use actix_web::{web, App, HttpServer};\n\n#[actix_web::main]\nasync fn main() -> std::io::Result<()> {\n    HttpServer::new(|| {\n        App::new()\n            .service(web::resource(\"/\").to(|| async { \"\" }))\n    })\n    .bind((\"127.0.0.1\", 8080))?\n    .run()\n    .await\n}\n"
  },
  {
    "path": "cookie.odin",
    "content": "package http\n\nimport \"core:io\"\nimport \"core:strconv\"\nimport \"core:strings\"\nimport \"core:time\"\n\nCookie_Same_Site :: enum {\n\tUnspecified,\n\tNone,\n\tStrict,\n\tLax,\n}\n\nCookie :: struct {\n\t_raw:         string,\n\tname:         string,\n\tvalue:        string,\n\tdomain:       Maybe(string),\n\texpires_gmt:  Maybe(time.Time),\n\tmax_age_secs: Maybe(int),\n\tpath:         Maybe(string),\n\thttp_only:    bool,\n\tpartitioned:  bool,\n\tsecure:       bool,\n\tsame_site:    Cookie_Same_Site,\n}\n\n// Builds the Set-Cookie header string representation of the given cookie.\ncookie_write :: proc(w: io.Writer, c: Cookie) -> io.Error {\n\t// odinfmt:disable\n\tio.write_string(w, \"set-cookie: \") or_return\n\twrite_escaped_newlines(w, c.name)  or_return\n\tio.write_byte(w, '=')              or_return\n\twrite_escaped_newlines(w, c.value) or_return\n\n\tif d, ok := c.domain.(string); ok {\n\t\tio.write_string(w, \"; Domain=\") or_return\n\t\twrite_escaped_newlines(w, d)    or_return\n\t}\n\n\tif e, ok := c.expires_gmt.(time.Time); ok {\n\t\tio.write_string(w, \"; Expires=\") or_return\n\t\tdate_write(w, e)                 or_return\n\t}\n\n\tif a, ok := c.max_age_secs.(int); ok {\n\t\tio.write_string(w, \"; Max-Age=\") or_return\n\t\tio.write_int(w, a)               or_return\n\t}\n\n\tif p, ok := c.path.(string); ok {\n\t\tio.write_string(w, \"; Path=\") or_return\n\t\twrite_escaped_newlines(w, p)  or_return\n\t}\n\n\tswitch c.same_site {\n\tcase .None:   io.write_string(w, \"; SameSite=None\")   or_return\n\tcase .Lax:    io.write_string(w, \"; SameSite=Lax\")    or_return\n\tcase .Strict: io.write_string(w, \"; SameSite=Strict\") or_return\n\tcase .Unspecified: // no-op.\n\t}\n\t// odinfmt:enable\n\n\tif c.secure {\n\t\tio.write_string(w, \"; Secure\") or_return\n\t}\n\n\tif c.partitioned {\n\t\tio.write_string(w, \"; Partitioned\") or_return\n\t}\n\n\tif c.http_only {\n\t\tio.write_string(w, \"; HttpOnly\") or_return\n\t}\n\n\treturn nil\n}\n\n// Builds the Set-Cookie header string representation of the given cookie.\ncookie_string :: proc(c: Cookie, allocator := context.allocator) -> string {\n\tb: strings.Builder\n\tstrings.builder_init(&b, 0, 20, allocator)\n\n\tcookie_write(strings.to_writer(&b), c)\n\n\treturn strings.to_string(b)\n}\n\n// TODO: check specific whitespace requirements in RFC.\n//\n// Allocations are done to check case-insensitive attributes but they are deleted right after.\n// So, all the returned strings (inside cookie) are slices into the given value string.\ncookie_parse :: proc(value: string, allocator := context.allocator) -> (cookie: Cookie, ok: bool) {\n\tvalue := value\n\n\teq := strings.index_byte(value, '=')\n\tif eq < 1 { return }\n\n\tcookie._raw = value\n\tcookie.name = value[:eq]\n\tvalue = value[eq + 1:]\n\n\tsemi := strings.index_byte(value, ';')\n\tswitch semi {\n\tcase -1:\n\t\tcookie.value = value\n\t\tok = true\n\t\treturn\n\tcase 0:\n\t\treturn\n\tcase:\n\t\tcookie.value = value[:semi]\n\t\tvalue = value[semi + 1:]\n\t}\n\n\tparse_part :: proc(cookie: ^Cookie, part: string, allocator := context.temp_allocator) -> (ok: bool) {\n\t\teq := strings.index_byte(part, '=')\n\t\tswitch eq {\n\t\tcase -1:\n\t\t\tkey := strings.to_lower(part, allocator)\n\t\t\tdefer delete(key, allocator)\n\n\t\t\tswitch key {\n\t\t\tcase \"httponly\":\n\t\t\t\tcookie.http_only = true\n\t\t\tcase \"partitioned\":\n\t\t\t\tcookie.partitioned = true\n\t\t\tcase \"secure\":\n\t\t\t\tcookie.secure = true\n\t\t\tcase:\n\t\t\t\treturn\n\t\t\t}\n\t\tcase 0:\n\t\t\treturn\n\t\tcase:\n\t\t\tkey := strings.to_lower(part[:eq], allocator)\n\t\t\tdefer delete(key, allocator)\n\n\t\t\tvalue := part[eq + 1:]\n\n\t\t\tswitch key {\n\t\t\tcase \"domain\":\n\t\t\t\tcookie.domain = value\n\t\t\tcase \"expires\":\n\t\t\t\tcookie.expires_gmt = cookie_date_parse(value) or_return\n\t\t\tcase \"max-age\":\n\t\t\t\tcookie.max_age_secs = strconv.parse_int(value, 10) or_return\n\t\t\tcase \"path\":\n\t\t\t\tcookie.path = value\n\t\t\tcase \"samesite\":\n\t\t\t\tswitch value {\n\t\t\t\tcase \"lax\", \"Lax\", \"LAX\":\n\t\t\t\t\tcookie.same_site = .Lax\n\t\t\t\tcase \"none\", \"None\", \"NONE\":\n\t\t\t\t\tcookie.same_site = .None\n\t\t\t\tcase \"strict\", \"Strict\", \"STRICT\":\n\t\t\t\t\tcookie.same_site = .Strict\n\t\t\t\tcase:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tfor semi = strings.index_byte(value, ';'); semi != -1; semi = strings.index_byte(value, ';') {\n\t\tpart := strings.trim_left_space(value[:semi])\n\t\tvalue = value[semi + 1:]\n\t\tparse_part(&cookie, part, allocator) or_return\n\t}\n\n\tpart := strings.trim_left_space(value)\n\tif part == \"\" {\n\t\tok = true\n\t\treturn\n\t}\n\n\tparse_part(&cookie, part, allocator) or_return\n\tok = true\n\treturn\n}\n\n/*\nImplementation of the algorithm described in RFC 6265 section 5.1.1.\n*/\ncookie_date_parse :: proc(value: string) -> (t: time.Time, ok: bool) {\n\n\titer_delim :: proc(value: ^string) -> (token: string, ok: bool) {\n\t\tstart := -1\n\t\tstart_loop: for ch, i in transmute([]byte)value^ {\n\t\t\tswitch ch {\n\t\t\tcase 0x09, 0x20..=0x2F, 0x3B..=0x40, 0x5B..=0x60, 0x7B..=0x7E:\n\t\t\tcase:\n\t\t\t\tstart = i\n\t\t\t\tbreak start_loop\n\t\t\t}\n\t\t}\n\n\t\tif start == -1 {\n\t\t\treturn\n\t\t}\n\n\t\ttoken = value[start:]\n\t\tlength := len(token)\n\t\tend_loop: for ch, i in transmute([]byte)token {\n\t\t\tswitch ch {\n\t\t\tcase 0x09, 0x20..=0x2F, 0x3B..=0x40, 0x5B..=0x60, 0x7B..=0x7E:\n\t\t\t\tlength = i\n\t\t\t\tbreak end_loop\n\t\t\t}\n\t\t}\n\n\t\tok = true\n\n\t\ttoken  = token[:length]\n\t\tvalue^ = value[start+length:]\n\t\treturn\n\t}\n\n\tparse_digits :: proc(value: string, min, max: int, trailing_ok: bool) -> (int, bool) {\n\t\tcount: int\n\t\tfor ch in transmute([]byte)value {\n\t\t\tif ch <= 0x2f || ch >= 0x3a {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcount += 1\n\t\t}\n\n\t\tif count < min || count > max {\n\t\t\treturn 0, false\n\t\t}\n\n\t\tif !trailing_ok && len(value) != count {\n\t\t\treturn 0, false\n\t\t}\n\n\t\treturn strconv.parse_int(value[:count], 10)\n\t}\n\n\tparse_time :: proc(token: string) -> (t: Time, ok: bool) {\n\t\thours, match1, tail := strings.partition(token, \":\")\n\t\tif match1 != \":\" { return }\n\t\tminutes, match2, seconds := strings.partition(tail,  \":\")\n\t\tif match2 != \":\" { return }\n\n\t\tt.hours   = parse_digits(hours,   1, 2, false) or_return\n\t\tt.minutes = parse_digits(minutes, 1, 2, false) or_return\n\t\tt.seconds = parse_digits(seconds, 1, 2, true)  or_return\n\n\t\tok = true\n\t\treturn\n\t}\n\n\tparse_month :: proc(token: string) -> (month: int) {\n\t\tif len(token) < 3 {\n\t\t\treturn\n\t\t}\n\n\t\tlower: [3]byte\n\t\tfor &ch, i in lower {\n\t\t\t#no_bounds_check orig := token[i]\n\t\t\tswitch orig {\n\t\t\tcase 'A'..='Z':\n\t\t\t\tch = orig + 32\n\t\t\tcase:\n\t\t\t\tch = orig\n\t\t\t}\n\t\t}\n\n\t\tswitch string(lower[:]) {\n\t\tcase \"jan\":\n\t\t\treturn 1\n\t\tcase \"feb\":\n\t\t\treturn 2\n\t\tcase \"mar\":\n\t\t\treturn 3\n\t\tcase \"apr\":\n\t\t\treturn 4\n\t\tcase \"may\":\n\t\t\treturn 5\n\t\tcase \"jun\":\n\t\t\treturn 6\n\t\tcase \"jul\":\n\t\t\treturn 7\n\t\tcase \"aug\":\n\t\t\treturn 8\n\t\tcase \"sep\":\n\t\t\treturn 9\n\t\tcase \"oct\":\n\t\t\treturn 10\n\t\tcase \"nov\":\n\t\t\treturn 11\n\t\tcase \"dec\":\n\t\t\treturn 12\n\t\tcase:\n\t\t\treturn\n\t\t}\n\t}\n\n\tTime :: struct {\n\t\thours, minutes, seconds: int,\n\t}\n\n\tclock: Maybe(Time)\n\tday_of_month, month, year: Maybe(int)\n\n\tvalue := value\n\tfor token in iter_delim(&value) {\n\t\tif _, has_time := clock.?; !has_time {\n\t\t\tif t, tok := parse_time(token); tok {\n\t\t\t\tclock = t\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif _, has_day_of_month := day_of_month.?; !has_day_of_month {\n\t\t\tif dom, dok := parse_digits(token, 1, 2, true); dok {\n\t\t\t\tday_of_month = dom\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif _, has_month := month.?; !has_month {\n\t\t\tif mon := parse_month(token); mon > 0 {\n\t\t\t\tmonth = mon\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif _, has_year := year.?; !has_year {\n\t\t\tif yr, yrok := parse_digits(token, 2, 4, true); yrok {\n\n\t\t\t\tif yr >= 70 && yr <= 99 {\n\t\t\t\t\tyr += 1900\n\t\t\t\t} else if yr >= 0 && yr <= 69 {\n\t\t\t\t\tyr += 2000\n\t\t\t\t}\n\n\t\t\t\tyear = yr\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\tc := clock.? or_return\n\ty := year.?  or_return\n\n\tif y < 1601 {\n\t\treturn\n\t}\n\n\tt = time.datetime_to_time(\n\t\ty,\n\t\tmonth.?        or_return,\n\t\tday_of_month.? or_return,\n\t\tc.hours,\n\t\tc.minutes,\n\t\tc.seconds,\n\t) or_return\n\n\tok = true\n\treturn\n}\n\n/*\nRetrieves the cookie with the given `key` out of the requests `Cookie` header.\n\nIf the same key is in the header multiple times the last one is returned.\n*/\nrequest_cookie_get :: proc(r: ^Request, key: string) -> (value: string, ok: bool) {\n\tcookies := headers_get_unsafe(r.headers, \"cookie\") or_return\n\n\tfor k, v in request_cookies_iter(&cookies) {\n\t\tif key == k { return v, true }\n\t}\n\n\treturn\n}\n\n/*\nAllocates a map with the given allocator and puts all cookie pairs from the requests `Cookie` header into it.\n\nIf the same key is in the header multiple times the last one is returned.\n*/\nrequest_cookies :: proc(r: ^Request, allocator := context.temp_allocator) -> (res: map[string]string) {\n\tres.allocator = allocator\n\n\tcookies := headers_get_unsafe(r.headers, \"cookie\") or_else \"\"\n\tfor k, v in request_cookies_iter(&cookies) {\n\t\t// Don't overwrite, the iterator goes from right to left and we want the last.\n\t\tif k in res { continue }\n\n\t\tres[k] = v\n\t}\n\n\treturn\n}\n\n/*\nIterates the cookies from right to left.\n*/\nrequest_cookies_iter :: proc(cookies: ^string) -> (key: string, value: string, ok: bool) {\n\tend := len(cookies)\n\teq  := -1\n\tfor i := end-1; i >= 0; i-=1 {\n\t\tb := cookies[i]\n\t\tstart := i == 0\n\t\tsep := start || b == ' ' && cookies[i-1] == ';'\n\t\tif sep {\n\t\t\tdefer end = i - 1\n\n\t\t\t// Invalid.\n\t\t\tif eq < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toff := 0 if start else 1\n\n\t\t\tkey   = cookies[i+off:eq]\n\t\t\tvalue = cookies[eq+1:end]\n\n\t\t\tcookies^ = cookies[:i-off]\n\n\t\t\treturn key, value, true\n\t\t} else if b == '=' {\n\t\t\teq = i\n\t\t}\n\t}\n\n\treturn\n}\n"
  },
  {
    "path": "docs/all.odin",
    "content": "/*\nThis file simply imports any packages we want in the documentation.\n*/\npackage docs\n\nimport \"../client\"\nimport http \"..\"\nimport \"../openssl\"\n\n_ :: client\n_ :: http\n_ :: openssl\n"
  },
  {
    "path": "docs/generate.sh",
    "content": "#!/usr/bin/env bash\n\nset -ex\n\ncd docs\n\nrm -rf build\nmkdir build\n\nodin doc . -all-packages -doc-format\n\ncd build\n\n# This is the binary of https://github.com/laytan/pkg.odin-lang.org, built by `odin built . -out:odin-doc`\nodin-doc ../docs.odin-doc ../odin-doc.json\n\n# For GitHub pages, a CNAME file with the intended domain is required.\necho \"odin-http.laytan.dev\" > CNAME\n\ncd ..\n\nrm docs.odin-doc\n\ncd ..\n"
  },
  {
    "path": "docs/odin-doc.json",
    "content": "{\n    \"hide_core\": true,\n\t\"hide_base\": true,\n    \"collections\": {\n        \"odin-http\": {\n            \"name\": \"http\",\n            \"source_url\": \"https://github.com/laytan/odin-http/blob/main\",\n            \"base_url\": \"/http\",\n            \"root_path\": \"../..\",\n            \"license\": {\n                \"text\": \"MIT\",\n                \"url\": \"https://github.com/laytan/odin-http/tree/main/LICENSE\"\n            },\n            \"home\": {\n                \"title\": \"Odin-HTTP\",\n                \"embed_readme\": \"../../README.md\"\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "examples/client/main.odin",
    "content": "package client_example\n\nimport \"core:fmt\"\n\nimport \"../../client\"\n\nmain :: proc() {\n\tget()\n\tpost()\n}\n\n// basic get request.\nget :: proc() {\n\tres, err := client.get(\"https://www.google.com/\")\n\tif err != nil {\n\t\tfmt.printf(\"Request failed: %s\", err)\n\t\treturn\n\t}\n\tdefer client.response_destroy(&res)\n\n\tfmt.printf(\"Status: %s\\n\", res.status)\n\tfmt.printf(\"Headers: %v\\n\", res.headers)\n\tfmt.printf(\"Cookies: %v\\n\", res.cookies)\n\tbody, allocation, berr := client.response_body(&res)\n\tif berr != nil {\n\t\tfmt.printf(\"Error retrieving response body: %s\", berr)\n\t\treturn\n\t}\n\tdefer client.body_destroy(body, allocation)\n\n\tfmt.println(body)\n}\n\nPost_Body :: struct {\n\tname:    string,\n\tmessage: string,\n}\n\n// POST request with JSON.\npost :: proc() {\n\treq: client.Request\n\tclient.request_init(&req, .Post)\n\tdefer client.request_destroy(&req)\n\n\tpbody := Post_Body{\"Laytan\", \"Hello, World!\"}\n\tif err := client.with_json(&req, pbody); err != nil {\n\t\tfmt.printf(\"JSON error: %s\", err)\n\t\treturn\n\t}\n\n\tres, err := client.request(&req, \"https://webhook.site/YOUR-ID-HERE\")\n\tif err != nil {\n\t\tfmt.printf(\"Request failed: %s\", err)\n\t\treturn\n\t}\n\tdefer client.response_destroy(&res)\n\n\tfmt.printf(\"Status: %s\\n\", res.status)\n\tfmt.printf(\"Headers: %v\\n\", res.headers)\n\tfmt.printf(\"Cookies: %v\\n\", res.cookies)\n\n\tbody, allocation, berr := client.response_body(&res)\n\tif berr != nil {\n\t\tfmt.printf(\"Error retrieving response body: %s\", berr)\n\t\treturn\n\t}\n\tdefer client.body_destroy(body, allocation)\n\n\tfmt.println(body)\n}\n"
  },
  {
    "path": "examples/tcp_echo/main.odin",
    "content": "package example_tcp_echo\n\nimport \"core:fmt\"\nimport \"core:net\"\nimport \"core:os\"\n\nimport nbio \"../../nbio/poly\"\n\nEcho_Server :: struct {\n\tio:          nbio.IO,\n\tsock:        net.TCP_Socket,\n\tconnections: [dynamic]^Echo_Connection,\n}\n\nEcho_Connection :: struct {\n\tserver:  ^Echo_Server,\n\tsock:    net.TCP_Socket,\n\tbuf:     [50]byte,\n}\n\nmain :: proc() {\n\tserver: Echo_Server\n\tdefer delete(server.connections)\n\n\tnbio.init(&server.io)\n\tdefer nbio.destroy(&server.io)\n\n\tsock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})\n\tfmt.assertf(err == nil, \"Error opening and listening on localhost:8080: %v\", err)\n\tserver.sock = sock\n\n\tnbio.accept(&server.io, sock, &server, echo_on_accept)\n\n\t// Start the event loop.\n\terrno: os.Errno\n\tfor errno == os.ERROR_NONE {\n\t\terrno = nbio.tick(&server.io)\n\t}\n\n\tfmt.assertf(errno == os.ERROR_NONE, \"Server stopped with error code: %v\", errno)\n}\n\necho_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\tfmt.assertf(err == nil, \"Error accepting a connection: %v\", err)\n\n\t// Register a new accept for the next client.\n\tnbio.accept(&server.io, server.sock, server, echo_on_accept)\n\n\tc := new(Echo_Connection)\n\tc.server = server\n\tc.sock   = client\n\tappend(&server.connections, c)\n\n\tnbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)\n}\n\necho_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {\n\tfmt.assertf(err == nil, \"Error receiving from client: %v\", err)\n\n\tnbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)\n}\n\necho_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {\n\tfmt.assertf(err == nil, \"Error sending to client: %v\", err)\n\n\t// Accept the next message, to then ultimately echo back again.\n\tnbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)\n}\n"
  },
  {
    "path": "handlers.odin",
    "content": "package http\n\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:sync\"\nimport \"core:time\"\n\nHandler_Proc :: proc(handler: ^Handler, req: ^Request, res: ^Response)\nHandle_Proc :: proc(req: ^Request, res: ^Response)\n\nHandler :: struct {\n\tuser_data: rawptr,\n\tnext:      Maybe(^Handler),\n\thandle:    Handler_Proc,\n}\n\n// TODO: something like http.handler_with_body which gets the body before calling the handler.\n\nhandler :: proc(handle: Handle_Proc) -> Handler {\n\th: Handler\n\th.user_data = rawptr(handle)\n\n\thandle := proc(h: ^Handler, req: ^Request, res: ^Response) {\n\t\tp := (Handle_Proc)(h.user_data)\n\t\tp(req, res)\n\t}\n\n\th.handle = handle\n\treturn h\n}\n\nmiddleware_proc :: proc(next: Maybe(^Handler), handle: Handler_Proc) -> Handler {\n\th: Handler\n\th.next = next\n\th.handle = handle\n\treturn h\n}\n\nRate_Limit_On_Limit :: struct {\n\tuser_data: rawptr,\n\ton_limit:  proc(req: ^Request, res: ^Response, user_data: rawptr),\n}\n\n// Convenience method to create a Rate_Limit_On_Limit that writes the given message.\nrate_limit_message :: proc(message: ^string) -> Rate_Limit_On_Limit {\n\treturn Rate_Limit_On_Limit{user_data = message, on_limit = proc(_: ^Request, res: ^Response, user_data: rawptr) {\n\t\tmessage := (^string)(user_data)\n\t\tbody_set(res, message^)\n\t\trespond(res)\n\t}}\n}\n\nRate_Limit_Opts :: struct {\n\twindow:   time.Duration,\n\tmax:      int,\n\n\t// Optional handler to call when a request is being rate-limited, allows you to customize the response.\n\ton_limit: Maybe(Rate_Limit_On_Limit),\n}\n\nRate_Limit_Data :: struct {\n\topts:       ^Rate_Limit_Opts,\n\tnext_sweep: time.Time,\n\thits:       map[net.Address]int,\n\tmu:         sync.Mutex,\n}\n\nrate_limit_destroy :: proc(data: ^Rate_Limit_Data) {\n\tsync.guard(&data.mu)\n\tdelete(data.hits)\n}\n\n// Basic rate limit based on IP address.\nrate_limit :: proc(data: ^Rate_Limit_Data, next: ^Handler, opts: ^Rate_Limit_Opts, allocator := context.allocator) -> Handler {\n\tassert(next != nil)\n\n\th: Handler\n\th.next = next\n\n\tdata.opts = opts\n\tdata.hits = make(map[net.Address]int, 16, allocator)\n\tdata.next_sweep = time.time_add(time.now(), opts.window)\n\th.user_data = data\n\n\th.handle = proc(h: ^Handler, req: ^Request, res: ^Response) {\n\t\tdata := (^Rate_Limit_Data)(h.user_data)\n\n\t\tsync.lock(&data.mu)\n\n\t\t// PERF: if this is not performing, we could run a thread that sweeps on a regular basis.\n\t\tif time.since(data.next_sweep) > 0 {\n\t\t\tclear(&data.hits)\n\t\t\tdata.next_sweep = time.time_add(time.now(), data.opts.window)\n\t\t}\n\n\t\thits := data.hits[req.client.address]\n\t\tdata.hits[req.client.address] = hits + 1\n\t\tsync.unlock(&data.mu)\n\n\t\tif hits > data.opts.max {\n\t\t\tres.status = .Too_Many_Requests\n\n\t\t\tretry_dur := i64(time.diff(time.now(), data.next_sweep) / time.Second)\n\t\t\tbuf := make([]byte, 32, context.temp_allocator)\n\t\t\tretry_str := strconv.write_int(buf, retry_dur, 10)\n\t\t\theaders_set_unsafe(&res.headers, \"retry-after\", retry_str)\n\n\t\t\tif on, ok := data.opts.on_limit.(Rate_Limit_On_Limit); ok {\n\t\t\t\ton.on_limit(req, res, on.user_data)\n\t\t\t} else {\n\t\t\t\trespond(res)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tnext := h.next.(^Handler)\n\t\tnext.handle(next, req, res)\n\t}\n\n\treturn h\n}\n"
  },
  {
    "path": "headers.odin",
    "content": "package http\n\nimport \"core:strings\"\n\n// A case-insensitive ASCII map for storing headers.\nHeaders :: struct {\n\t_kv:      map[string]string,\n\treadonly: bool,\n}\n\nheaders_init :: proc(h: ^Headers, allocator := context.temp_allocator) {\n\th._kv.allocator = allocator\n}\n\nheaders_count :: #force_inline proc(h: Headers) -> int {\n\treturn len(h._kv)\n}\n\n/*\nSets a header, given key is first sanitized, final (sanitized) key is returned.\n*/\nheaders_set :: proc(h: ^Headers, k: string, v: string, loc := #caller_location) -> string {\n\tif h.readonly {\n\t\tpanic(\"these headers are readonly, did you accidentally try to set a header on the request?\", loc)\n\t}\n\n\tl := sanitize_key(h^, k)\n\th._kv[l] = v\n\treturn l\n}\n\n/*\nUnsafely set header, given key is assumed to be a lowercase string and to be without newlines.\n*/\nheaders_set_unsafe :: #force_inline proc(h: ^Headers, k: string, v: string, loc := #caller_location) {\n\tassert(!h.readonly, \"these headers are readonly, did you accidentally try to set a header on the request?\", loc)\n\th._kv[k] = v\n}\n\nheaders_get :: proc(h: Headers, k: string) -> (string, bool) #optional_ok {\n\treturn h._kv[sanitize_key(h, k)]\n}\n\n/*\nUnsafely get header, given key is assumed to be a lowercase string.\n*/\nheaders_get_unsafe :: #force_inline proc(h: Headers, k: string) -> (string, bool) #optional_ok {\n\treturn h._kv[k]\n}\n\nheaders_entry :: proc(h: ^Headers, k: string, loc := #caller_location) -> (key_ptr: ^string, value_ptr: ^string, just_inserted: bool) {\n\tassert(!h.readonly, \"these headers are readonly, did you accidentally try to set a header on the request?\", loc)\n\tkey_ptr, value_ptr, just_inserted, _ = map_entry(&h._kv, sanitize_key(h^, k))\n\treturn\n}\n\nheaders_entry_unsafe :: #force_inline proc(h: ^Headers, k: string, loc := #caller_location) -> (key_ptr: ^string, value_ptr: ^string, just_inserted: bool) {\n\tassert(!h.readonly, \"these headers are readonly, did you accidentally try to set a header on the request?\", loc)\n\tkey_ptr, value_ptr, just_inserted, _ = map_entry(&h._kv, k)\n\treturn\n}\n\nheaders_has :: proc(h: Headers, k: string) -> bool {\n\treturn sanitize_key(h, k) in h._kv\n}\n\n/*\nUnsafely check for a header, given key is assumed to be a lowercase string.\n*/\nheaders_has_unsafe :: #force_inline proc(h: Headers, k: string) -> bool {\n\treturn k in h._kv\n}\n\nheaders_delete :: proc(h: ^Headers, k: string) -> (deleted_key: string, deleted_value: string) {\n\treturn delete_key(&h._kv, sanitize_key(h^, k))\n}\n\n/*\nUnsafely delete a header, given key is assumed to be a lowercase string.\n*/\nheaders_delete_unsafe :: #force_inline proc(h: ^Headers, k: string) {\n\tdelete_key(&h._kv, k)\n}\n\n/* Common Helpers */\n\nheaders_set_content_type :: proc {\n\theaders_set_content_type_mime,\n\theaders_set_content_type_string,\n}\n\nheaders_set_content_type_string :: #force_inline proc(h: ^Headers, ct: string) {\n\theaders_set_unsafe(h, \"content-type\", ct)\n}\n\nheaders_set_content_type_mime :: #force_inline proc(h: ^Headers, ct: Mime_Type) {\n\theaders_set_unsafe(h, \"content-type\", mime_to_content_type(ct))\n}\n\nheaders_set_close :: #force_inline proc(h: ^Headers) {\n\theaders_set_unsafe(h, \"connection\", \"close\")\n}\n\n/*\nEscapes any newlines and converts ASCII to lowercase.\n*/\n@(private=\"package\")\nsanitize_key :: proc(h: Headers, k: string) -> string {\n\tallocator := h._kv.allocator if h._kv.allocator.procedure != nil else context.temp_allocator\n\n\t// general +4 in rare case of newlines, so we might not need to reallocate.\n\tb := strings.builder_make(0, len(k)+4, allocator)\n\tfor c in k {\n\t\tswitch c {\n\t\tcase 'A'..='Z': strings.write_rune(&b, c + 32)\n\t\tcase '\\n':      strings.write_string(&b, \"\\\\n\")\n\t\tcase:           strings.write_rune(&b, c)\n\t\t}\n\t}\n\treturn strings.to_string(b)\n\n\t// NOTE: implementation that only allocates if needed, but we use arena's anyway so just allocating\n\t// some space should be about as fast?\n\t//\n\t// b: strings.Builder = ---\n\t// i: int\n\t// for c in v {\n\t// \tif c == '\\n' || (c >= 'A' && c <= 'Z') {\n\t// \t\tb = strings.builder_make(0, len(v)+4, allocator)\n\t// \t\tstrings.write_string(&b, v[:i])\n\t// \t\talloc = true\n\t// \t\tbreak\n\t// \t}\n\t// \ti+=1\n\t// }\n\t//\n\t// if !alloc {\n\t// \treturn v, false\n\t// }\n\t//\n\t// for c in v[i:] {\n\t//  switch c {\n\t//  case 'A'..='Z': strings.write_rune(&b, c + 32)\n\t//  case '\\n':      strings.write_string(&b, \"\\\\n\")\n\t//  case:           strings.write_rune(&b, c)\n\t//  }\n\t// }\n\t//\n\t// return strings.to_string(b), true\n}\n"
  },
  {
    "path": "http.odin",
    "content": "package http\n\nimport \"base:runtime\"\n\nimport \"core:io\"\nimport \"core:slice\"\nimport \"core:strconv\"\nimport \"core:strings\"\nimport \"core:sync\"\nimport \"core:time\"\n\nRequestline_Error :: enum {\n\tNone,\n\tMethod_Not_Implemented,\n\tNot_Enough_Fields,\n\tInvalid_Version_Format,\n}\n\nRequestline :: struct {\n\tmethod:  Method,\n\ttarget:  union {\n\t\tstring,\n\t\tURL,\n\t},\n\tversion: Version,\n}\n\n// A request-line begins with a method token, followed by a single space\n// (SP), the request-target, another single space (SP), the protocol\n// version, and ends with CRLF.\n//\n// This allocates a clone of the target, because this is intended to be used with a scanner,\n// which has a buffer that changes every read.\nrequestline_parse :: proc(s: string, allocator := context.temp_allocator) -> (line: Requestline, err: Requestline_Error) {\n\ts := s\n\n\tnext_space := strings.index_byte(s, ' ')\n\tif next_space == -1 { return line, .Not_Enough_Fields }\n\n\tok: bool\n\tline.method, ok = method_parse(s[:next_space])\n\tif !ok { return line, .Method_Not_Implemented }\n\ts = s[next_space + 1:]\n\n\tnext_space = strings.index_byte(s, ' ')\n\tif next_space == -1 { return line, .Not_Enough_Fields }\n\n\tline.target = strings.clone(s[:next_space], allocator)\n\ts = s[len(line.target.(string)) + 1:]\n\n\tline.version, ok = version_parse(s)\n\tif !ok { return line, .Invalid_Version_Format }\n\n\treturn\n}\n\nrequestline_write :: proc(w: io.Writer, rline: Requestline) -> io.Error {\n\t// odinfmt:disable\n\tio.write_string(w, method_string(rline.method)) or_return // <METHOD>\n\tio.write_byte(w, ' ')                           or_return // <METHOD> <SP>\n\n\tswitch t in rline.target {\n\tcase string: io.write_string(w, t)              or_return // <METHOD> <SP> <TARGET>\n\tcase URL:    request_path_write(w, t)           or_return // <METHOD> <SP> <TARGET>\n\t}\n\n\tio.write_byte(w, ' ')                           or_return // <METHOD> <SP> <TARGET> <SP>\n\tversion_write(w, rline.version)                 or_return // <METHOD> <SP> <TARGET> <SP> <VERSION>\n\tio.write_string(w, \"\\r\\n\")                      or_return // <METHOD> <SP> <TARGET> <SP> <VERSION> <CRLF>\n\t// odinfmt:enable\n\n\treturn nil\n}\n\nVersion :: struct {\n\tmajor: u8,\n\tminor: u8,\n}\n\n// Parses an HTTP version string according to RFC 7230, section 2.6.\nversion_parse :: proc(s: string) -> (version: Version, ok: bool) {\n\tswitch len(s) {\n\tcase 8:\n\t\t(s[6] == '.') or_return\n\t\tversion.minor = u8(int(s[7]) - '0')\n\t\tfallthrough\n\tcase 6:\n\t\t(s[:5] == \"HTTP/\") or_return\n\t\tversion.major = u8(int(s[5]) - '0')\n\tcase:\n\t\treturn\n\t}\n\tok = true\n\treturn\n}\n\nversion_write :: proc(w: io.Writer, v: Version) -> io.Error {\n\tio.write_string(w, \"HTTP/\") or_return\n\tio.write_rune(w, '0' + rune(v.major)) or_return\n\tif v.minor > 0 {\n\t\tio.write_rune(w, '.')\n\t\tio.write_rune(w, '0' + rune(v.minor))\n\t}\n\n\treturn nil\n}\n\nversion_string :: proc(v: Version, allocator := context.allocator) -> string {\n\tbuf := make([]byte, 8, allocator)\n\n\tb: strings.Builder\n\tb.buf = slice.into_dynamic(buf)\n\n\tversion_write(strings.to_writer(&b), v)\n\n\treturn strings.to_string(b)\n}\n\nMethod :: enum {\n\tGet,\n\tPost,\n\tDelete,\n\tPatch,\n\tPut,\n\tHead,\n\tConnect,\n\tOptions,\n\tTrace,\n}\n\n_method_strings := [?]string{\"GET\", \"POST\", \"DELETE\", \"PATCH\", \"PUT\", \"HEAD\", \"CONNECT\", \"OPTIONS\", \"TRACE\"}\n\nmethod_string :: proc(m: Method) -> string #no_bounds_check {\n\tif m < .Get || m > .Trace { return \"\" }\n\treturn _method_strings[m]\n}\n\nmethod_parse :: proc(m: string) -> (method: Method, ok: bool) #no_bounds_check {\n\t// PERF: I assume this is faster than a map with this amount of items.\n\n\tfor r in Method {\n\t\tif _method_strings[r] == m {\n\t\t\treturn r, true\n\t\t}\n\t}\n\n\treturn nil, false\n}\n\n// Parses the header and adds it to the headers if valid. The given string is copied.\nheader_parse :: proc(headers: ^Headers, line: string, allocator := context.temp_allocator) -> (key: string, ok: bool) {\n\t// Preceding spaces should not be allowed.\n\t(len(line) > 0 && line[0] != ' ') or_return\n\n\tcolon := strings.index_byte(line, ':')\n\t(colon > 0) or_return\n\n\t// There must not be a space before the colon.\n\t(line[colon - 1] != ' ') or_return\n\n\t// TODO/PERF: only actually relevant/needed if the key is one of these.\n\thas_host   := headers_has_unsafe(headers^, \"host\")\n\tcl, has_cl := headers_get_unsafe(headers^, \"content-length\")\n\n\tvalue := strings.trim_space(line[colon + 1:])\n\ttmp_key := sanitize_key(headers^, line[:colon])\n\tdefer if !ok { delete(tmp_key, allocator) }\n\n\t// RFC 7230 5.4: Server MUST respond with 400 to any request\n\t// with multiple \"Host\" header fields.\n\tif tmp_key == \"host\" && has_host {\n\t\treturn\n\t}\n\n\t// RFC 7230 3.3.3: If a message is received without Transfer-Encoding and with\n\t// either multiple Content-Length header fields having differing\n\t// field-values or a single Content-Length header field having an\n\t// invalid value, then the message framing is invalid and the\n\t// recipient MUST treat it as an unrecoverable error.\n\tif tmp_key == \"content-length\" && has_cl && cl != value {\n\t\treturn\n\t}\n\n\t// RFC 9110 5.3: A recipient MAY combine multiple field lines within a field section\n\t// that have the same field name into one field line, without changing\n\t// the semantics of the message, by appending each subsequent field line\n\t// value to the initial field line value in order, separated by a comma\n\t// (\",\") and optional whitespace (OWS, defined in Section 5.6.3). For\n\t// consistency, use comma SP.\n\tkey_ptr, value_ptr, just_inserted := headers_entry_unsafe(headers, tmp_key)\n\tif just_inserted {\n\t\tvalue = strings.clone(value, allocator)\n\t} else {\n\t\tvalue = strings.concatenate({value_ptr^, \", \", value}, allocator)\n\t\tdelete(tmp_key, allocator)\n\t\tdelete(value_ptr^, allocator)\n\t}\n\tkey = key_ptr^\n\tvalue_ptr^ = value\n\n\tok = true\n\treturn\n}\n\n// Returns if this is a valid trailer header.\n//\n// RFC 7230 4.1.2:\n// A sender MUST NOT generate a trailer that contains a field necessary\n// for message framing (e.g., Transfer-Encoding and Content-Length),\n// routing (e.g., Host), request modifiers (e.g., controls and\n// conditionals in Section 5 of [RFC7231]), authentication (e.g., see\n// [RFC7235] and [RFC6265]), response control data (e.g., see Section\n// 7.1 of [RFC7231]), or determining how to process the payload (e.g.,\n// Content-Encoding, Content-Type, Content-Range, and Trailer).\nheader_allowed_trailer :: proc(key: string) -> bool {\n\t// odinfmt:disable\n\treturn (\n\t\t// Message framing:\n\t\tkey != \"transfer-encoding\" &&\n\t\tkey != \"content-length\" &&\n\t\t// Routing:\n\t\tkey != \"host\" &&\n\t\t// Request modifiers:\n\t\tkey != \"if-match\" &&\n\t\tkey != \"if-none-match\" &&\n\t\tkey != \"if-modified-since\" &&\n\t\tkey != \"if-unmodified-since\" &&\n\t\tkey != \"if-range\" &&\n\t\t// Authentication:\n\t\tkey != \"www-authenticate\" &&\n\t\tkey != \"authorization\" &&\n\t\tkey != \"proxy-authenticate\" &&\n\t\tkey != \"proxy-authorization\" &&\n\t\tkey != \"cookie\" &&\n\t\tkey != \"set-cookie\" &&\n\t\t// Control data:\n\t\tkey != \"age\" &&\n\t\tkey != \"cache-control\" &&\n\t\tkey != \"expires\" &&\n\t\tkey != \"date\" &&\n\t\tkey != \"location\" &&\n\t\tkey != \"retry-after\" &&\n\t\tkey != \"vary\" &&\n\t\tkey != \"warning\" &&\n\t\t// How to process:\n\t\tkey != \"content-encoding\" &&\n\t\tkey != \"content-type\" &&\n\t\tkey != \"content-range\" &&\n\t\tkey != \"trailer\")\n\t// odinfmt:enable\n}\n\n@(private)\nDATE_LENGTH :: len(\"Fri, 05 Feb 2023 09:01:10 GMT\")\n\n// Formats a time in the HTTP header format (no timezone conversion is done, GMT expected):\n// `<day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT`\ndate_write :: proc(w: io.Writer, t: time.Time) -> io.Error {\n\tyear, month, day := time.date(t)\n\thour, minute, second := time.clock_from_time(t)\n\twday := time.weekday(t)\n\n\t// odinfmt:disable\n\tio.write_string(w, DAYS[wday])    or_return // 'Fri, '\n\twrite_padded_int(w, day)          or_return // 'Fri, 05'\n\tio.write_string(w, MONTHS[month]) or_return // 'Fri, 05 Feb '\n\tio.write_int(w, year)             or_return // 'Fri, 05 Feb 2023'\n\tio.write_byte(w, ' ')             or_return // 'Fri, 05 Feb 2023 '\n\twrite_padded_int(w, hour)         or_return // 'Fri, 05 Feb 2023 09'\n\tio.write_byte(w, ':')             or_return // 'Fri, 05 Feb 2023 09:'\n\twrite_padded_int(w, minute)       or_return // 'Fri, 05 Feb 2023 09:01'\n\tio.write_byte(w, ':')             or_return // 'Fri, 05 Feb 2023 09:01:'\n\twrite_padded_int(w, second)       or_return // 'Fri, 05 Feb 2023 09:01:10'\n\tio.write_string(w, \" GMT\")        or_return // 'Fri, 05 Feb 2023 09:01:10 GMT'\n\t// odinfmt:enable\n\n\treturn nil\n}\n\n// Formats a time in the HTTP header format (no timezone conversion is done, GMT expected):\n// `<day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT`\ndate_string :: proc(t: time.Time, allocator := context.allocator) -> string {\n\tb: strings.Builder\n\n\tbuf := make([]byte, DATE_LENGTH, allocator)\n\tb.buf = slice.into_dynamic(buf)\n\n\tdate_write(strings.to_writer(&b), t)\n\n\treturn strings.to_string(b)\n}\n\ndate_parse :: proc(value: string) -> (t: time.Time, ok: bool) #no_bounds_check {\n\tif len(value) != DATE_LENGTH { return }\n\n\t// Remove 'Fri, '\n\tvalue := value\n\tvalue = value[5:]\n\n\t// Parse '05'\n\tday := strconv.parse_i64_of_base(value[:2], 10) or_return\n\tvalue = value[2:]\n\n\t// Parse ' Feb ' or '-Feb-' (latter is a deprecated format but should still be parsed).\n\tmonth_index := -1\n\tmonth_str := value[1:4]\n\tvalue = value[5:]\n\tfor month, i in MONTHS[1:] {\n\t\tif month_str == month[1:4] {\n\t\t\tmonth_index = i\n\t\t\tbreak\n\t\t}\n\t}\n\tmonth_index += 1\n\tif month_index <= 0 { return }\n\n\tyear := strconv.parse_i64_of_base(value[:4], 10) or_return\n\tvalue = value[4:]\n\n\thour := strconv.parse_i64_of_base(value[1:3], 10) or_return\n\tvalue = value[4:]\n\n\tminute := strconv.parse_i64_of_base(value[:2], 10) or_return\n\tvalue = value[3:]\n\n\tseconds := strconv.parse_i64_of_base(value[:2], 10) or_return\n\tvalue = value[3:]\n\n\t// Should have only 'GMT' left now.\n\tif value != \"GMT\" { return }\n\n\tt = time.datetime_to_time(int(year), int(month_index), int(day), int(hour), int(minute), int(seconds)) or_return\n\tok = true\n\treturn\n}\n\nrequest_path_write :: proc(w: io.Writer, target: URL) -> io.Error {\n\t// TODO: maybe net.percent_encode.\n\n\tif target.path == \"\" {\n\t\tio.write_byte(w, '/') or_return\n\t} else {\n\t\tio.write_string(w, target.path) or_return\n\t}\n\n\tif len(target.query) > 0 {\n\t\tio.write_byte(w, '?') or_return\n\t\tio.write_string(w, target.query) or_return\n\t}\n\n\treturn nil\n}\n\nrequest_path :: proc(target: URL, allocator := context.allocator) -> (rq_path: string) {\n\tres := strings.builder_make(0, len(target.path), allocator)\n\trequest_path_write(strings.to_writer(&res), target)\n\treturn strings.to_string(res)\n}\n\n_dynamic_unwritten :: proc(d: [dynamic]$E) -> []E  {\n\treturn (cast([^]E)raw_data(d))[len(d):cap(d)]\n}\n\n_dynamic_add_len :: proc(d: ^[dynamic]$E, len: int) {\n\t(transmute(^runtime.Raw_Dynamic_Array)d).len += len\n}\n\n@(private)\nwrite_padded_int :: proc(w: io.Writer, i: int) -> io.Error {\n\tif i < 10 {\n\t\tio.write_string(w, PADDED_NUMS[i]) or_return\n\t\treturn nil\n\t}\n\n\t_, err := io.write_int(w, i)\n\treturn err\n}\n\n@(private)\nwrite_escaped_newlines :: proc(w: io.Writer, v: string) -> io.Error {\n\tfor c in v {\n\t\tif c == '\\n' {\n\t\t\tio.write_string(w, \"\\\\n\") or_return\n\t\t} else {\n\t\t\tio.write_rune(w, c) or_return\n\t\t}\n\t}\n\treturn nil\n}\n\n@(private)\nPADDED_NUMS := [10]string{\"00\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\"}\n\n@(private)\nDAYS := [7]string{\"Sun, \", \"Mon, \", \"Tue, \", \"Wed, \", \"Thu, \", \"Fri, \", \"Sat, \"}\n\n@(private)\nMONTHS := [13]string {\n\t\" \", // Jan is 1, so 0 should never be accessed.\n\t\" Jan \",\n\t\" Feb \",\n\t\" Mar \",\n\t\" Apr \",\n\t\" May \",\n\t\" Jun \",\n\t\" Jul \",\n\t\" Aug \",\n\t\" Sep \",\n\t\" Oct \",\n\t\" Nov \",\n\t\" Dec \",\n}\n\n@(private)\nAtomic :: struct($T: typeid) {\n\traw: T,\n}\n\n@(private)\natomic_store :: #force_inline proc(a: ^Atomic($T), val: T) {\n\tsync.atomic_store(&a.raw, val)\n}\n\n@(private)\natomic_load :: #force_inline proc(a: ^Atomic($T)) -> T {\n\treturn sync.atomic_load(&a.raw)\n}\n\nimport \"core:testing\"\n\n@(test)\ntest_dynamic_unwritten :: proc(t: ^testing.T) {\n\t{\n\t\td  := make([dynamic]int, 4, 8)\n\t\tdu := _dynamic_unwritten(d)\n\n\t\ttesting.expect(t, len(du) == 4)\n\t}\n\n\t{\n\t\td := slice.into_dynamic([]int{1, 2, 3, 4, 5})\n\t\t_dynamic_add_len(&d, 3)\n\t\tdu := _dynamic_unwritten(d)\n\n\t\ttesting.expect(t, len(d)  == 3)\n\t\ttesting.expect(t, len(du) == 2)\n\t\ttesting.expect(t, du[0] == 4)\n\t\ttesting.expect(t, du[1] == 5)\n\t}\n\n\t{\n\t\td := slice.into_dynamic([]int{})\n\t\tdu := _dynamic_unwritten(d)\n\n\t\ttesting.expect(t, len(du) == 0)\n\t}\n}\n"
  },
  {
    "path": "mimes.odin",
    "content": "package http\n\nimport \"core:path/filepath\"\n\nMime_Type :: enum {\n\tPlain,\n\n\tCss,\n\tCsv,\n\tGif,\n\tHtml,\n\tIco,\n\tJpeg,\n\tJs,\n\tJson,\n\tPng,\n\tSvg,\n\tUrl_Encoded,\n\tXml,\n\tZip,\n\tWasm,\n}\n\nmime_from_extension :: proc(s: string) -> Mime_Type {\n\t//odinfmt:disable\n\tswitch filepath.ext(s) {\n\tcase \".html\": return .Html\n\tcase \".js\":   return .Js\n\tcase \".css\":  return .Css\n\tcase \".csv\":  return .Csv\n\tcase \".xml\":  return .Xml\n\tcase \".zip\":  return .Zip\n\tcase \".json\": return .Json\n\tcase \".ico\":  return .Ico\n\tcase \".gif\":  return .Gif\n\tcase \".jpeg\": return .Jpeg\n\tcase \".png\":  return .Png\n\tcase \".svg\":  return .Svg\n\tcase \".wasm\": return .Wasm\n\tcase:         return .Plain\n\t}\n\t//odinfmt:enable\n}\n\n@(private=\"file\")\n_mime_to_content_type := [Mime_Type]string{\n\t.Plain       = \"text/plain\",\n\n\t.Css         = \"text/css\",\n\t.Csv         = \"text/csv\",\n\t.Gif         = \"image/gif\",\n\t.Html        = \"text/html\",\n\t.Ico         = \"application/vnd.microsoft.ico\",\n\t.Jpeg        = \"image/jpeg\",\n\t.Js          = \"application/javascript\",\n\t.Json        = \"application/json\",\n\t.Png         = \"image/png\",\n\t.Svg         = \"image/svg+xml\",\n\t.Url_Encoded = \"application/x-www-form-urlencoded\",\n\t.Xml         = \"text/xml\",\n\t.Zip         = \"application/zip\",\n\t.Wasm        = \"application/wasm\",\n}\n\nmime_to_content_type :: proc(m: Mime_Type) -> string {\n\treturn _mime_to_content_type[m]\n}\n"
  },
  {
    "path": "mod.pkg",
    "content": "{\n\t\"version\": \"0.0.4-beta\",\n\t\"description\": \"A HTTP/1.1 client/server implementation\",\n\t\"url\": \"https://github.com/laytan/odin-http\",\n\t\"readme\": \"README.md\",\n\t\"license\": \"MIT\",\n\t\"keywords\": [\"HTTP\"]\n}\n"
  },
  {
    "path": "odinfmt.json",
    "content": "{\n\t\"character_width\": 120,\n\t\"tabs\": true,\n\t\"tabs_width\": 4\n}\n"
  },
  {
    "path": "old_nbio/README.md",
    "content": "# package nbio\n\nPackage nbio implements a non blocking IO abstraction layer over several platform specific APIs.\n\nThis package implements an event loop based abstraction.\n\n*TODO:*\n\n- Benchmarking\n- Some UDP implementations\n\n*APIs:*\n\n- Windows: [IOCP (IO Completion Ports)](https://en.wikipedia.org/wiki/Input/output_completion_port)\n- Linux:   [io_uring](https://en.wikipedia.org/wiki/Io_uring)\n- Darwin:  [KQueue](https://en.wikipedia.org/wiki/Kqueue)\n\n*How to read the code:*\n\nThe file nbio.odin can be read a little bit like a header file,\nit has all the procedures heavily explained and commented and dispatches them to platform specific code.\n\nYou can also have a look at the tests for more general usages, the example below or the generated docs even further below.\n\n```odin\n/*\nThis example shows a simple TCP server that echos back anything it receives.\n\nBetter error handling and closing/freeing connections are left for the reader.\n*/\npackage main\n\nimport \"core:fmt\"\nimport \"core:net\"\nimport \"core:os\"\n\nimport nbio \"nbio/poly\"\n\nEcho_Server :: struct {\n\tio:          nbio.IO,\n\tsock:        net.TCP_Socket,\n\tconnections: [dynamic]^Echo_Connection,\n}\n\nEcho_Connection :: struct {\n\tserver:  ^Echo_Server,\n\tsock:    net.TCP_Socket,\n\tbuf:     [50]byte,\n}\n\nmain :: proc() {\n\tserver: Echo_Server\n\tdefer delete(server.connections)\n\n\tnbio.init(&server.io)\n\tdefer nbio.destroy(&server.io)\n\n\tsock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})\n\tfmt.assertf(err == nil, \"Error opening and listening on localhost:8080: %v\", err)\n\tserver.sock = sock\n\n\tnbio.accept(&server.io, sock, &server, echo_on_accept)\n\n\t// Start the event loop.\n\terrno: os.Errno\n\tfor errno == os.ERROR_NONE {\n\t\terrno = nbio.tick(&server.io)\n\t}\n\n\tfmt.assertf(errno == os.ERROR_NONE, \"Server stopped with error code: %v\", errno)\n}\n\necho_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\tfmt.assertf(err == nil, \"Error accepting a connection: %v\", err)\n\n\t// Register a new accept for the next client.\n\tnbio.accept(&server.io, server.sock, server, echo_on_accept)\n\n\tc := new(Echo_Connection)\n\tc.server = server\n\tc.sock   = client\n\tappend(&server.connections, c)\n\n\tnbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)\n}\n\necho_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {\n\tfmt.assertf(err == nil, \"Error receiving from client: %v\", err)\n\n\tnbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)\n}\n\necho_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {\n\tfmt.assertf(err == nil, \"Error sending to client: %v\", err)\n\n\t// Accept the next message, to then ultimately echo back again.\n\tnbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)\n}\n```\n"
  },
  {
    "path": "old_nbio/_io_uring/os.odin",
    "content": "#+build linux\npackage io_uring\n\nimport \"core:math\"\nimport \"core:os\"\nimport \"core:sync\"\nimport \"core:sys/linux\"\nimport \"core:sys/unix\"\n\nDEFAULT_THREAD_IDLE_MS :: 1000\nDEFAULT_ENTRIES :: 32\nMAX_ENTRIES :: 4096\n\nIO_Uring_Error :: enum {\n\tNone,\n\tEntries_Zero,\n\tEntries_Not_Power_Of_Two,\n\tEntries_Too_Large,\n\tParams_Outside_Accessible_Address_Space,\n\tArguments_Invalid,\n\tProcess_Fd_Quota_Exceeded,\n\tSystem_Fd_Quota_Exceeded,\n\tSystem_Resources,\n\tPermission_Denied,\n\tSystem_Outdated,\n\tSubmission_Queue_Full,\n\tFile_Descriptor_Invalid,\n\tCompletion_Queue_Overcommitted,\n\tSubmission_Queue_Entry_Invalid,\n\tBuffer_Invalid,\n\tRing_Shutting_Down,\n\tOpcode_Not_Supported,\n\tSignal_Interrupt,\n\tUnexpected,\n}\n\nIO_Uring :: struct {\n\tfd:       os.Handle,\n\tsq:       Submission_Queue,\n\tcq:       Completion_Queue,\n\tflags:    u32,\n\tfeatures: u32,\n}\n\n// Set up an IO_Uring with default parameters, `entries` must be a power of 2 between 1 and 4096.\nio_uring_make :: proc(\n\tparams: ^io_uring_params,\n\tentries: u32 = DEFAULT_ENTRIES,\n\tflags: u32 = 0,\n) -> (\n\tring: IO_Uring,\n\terr: IO_Uring_Error,\n) {\n\tparams.flags = flags\n\tparams.sq_thread_idle = DEFAULT_THREAD_IDLE_MS\n\terr = io_uring_init(&ring, entries, params)\n\treturn\n}\n\n// Initialize and setup a io_uring with more control than io_uring_make.\nio_uring_init :: proc(ring: ^IO_Uring, entries: u32, params: ^io_uring_params) -> (err: IO_Uring_Error) {\n\tcheck_entries(entries) or_return\n\n\tres := sys_io_uring_setup(entries, params)\n\tif res < 0 {\n\t\t#partial switch os.Platform_Error(-res) {\n\t\tcase .EFAULT:\n\t\t\treturn .Params_Outside_Accessible_Address_Space\n\t\t// The resv array contains non-zero data, p.flags contains an unsupported flag,\n\t\t// entries out of bounds, IORING_SETUP_SQ_AFF was specified without IORING_SETUP_SQPOLL,\n\t\t// or IORING_SETUP_CQSIZE was specified but linux.io_uring_params.cq_entries was invalid:\n\t\tcase .EINVAL:\n\t\t\treturn .Arguments_Invalid\n\t\tcase .EMFILE:\n\t\t\treturn .Process_Fd_Quota_Exceeded\n\t\tcase .ENFILE:\n\t\t\treturn .System_Fd_Quota_Exceeded\n\t\tcase .ENOMEM:\n\t\t\treturn .System_Resources\n\t\t// IORING_SETUP_SQPOLL was specified but effective user ID lacks sufficient privileges,\n\t\t// or a container seccomp policy prohibits io_uring syscalls:\n\t\tcase .EPERM:\n\t\t\treturn .Permission_Denied\n\t\tcase .ENOSYS:\n\t\t\treturn .System_Outdated\n\t\tcase:\n\t\t\treturn .Unexpected\n\t\t}\n\t}\n\n\tfd := os.Handle(res)\n\n\t// Unsupported features.\n\tassert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)\n\tassert((params.flags & IORING_SETUP_CQE32) == 0)\n\tassert((params.flags & IORING_SETUP_SQE128) == 0)\n\n\tsq, ok := submission_queue_make(fd, params)\n\tif !ok { return .System_Resources }\n\n\tring.fd = fd\n\tring.sq = sq\n\tring.cq = completion_queue_make(fd, params, &sq)\n\tring.flags = params.flags\n\tring.features = params.features\n\n\treturn\n}\n\n// Checks if the entries conform to the kernel rules.\n@(private)\ncheck_entries :: proc(entries: u32) -> (err: IO_Uring_Error) {\n\tswitch {\n\tcase entries >= MAX_ENTRIES:\n\t\terr = .Entries_Too_Large\n\tcase entries == 0:\n\t\terr = .Entries_Zero\n\tcase !math.is_power_of_two(int(entries)):\n\t\terr = .Entries_Not_Power_Of_Two\n\tcase:\n\t\terr = .None\n\t}\n\treturn\n}\n\nio_uring_destroy :: proc(ring: ^IO_Uring) {\n\tassert(ring.fd >= 0)\n\tsubmission_queue_destroy(&ring.sq)\n\tos.close(ring.fd)\n\tring.fd = -1\n}\n\n// Returns a pointer to a vacant submission queue entry, or an error if the submission queue is full.\nget_sqe :: proc(ring: ^IO_Uring) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {\n\tsq := &ring.sq\n\thead: u32 = sync.atomic_load_explicit(sq.head, .Acquire)\n\tnext := sq.sqe_tail + 1\n\n\tif int(next - head) > len(sq.sqes) {\n\t\terr = .Submission_Queue_Full\n\t\treturn\n\t}\n\n\tsqe = &sq.sqes[sq.sqe_tail & sq.mask]\n\tsqe^ = {}\n\n\tsq.sqe_tail = next\n\treturn\n}\n\n// Submits the submission queue entries acquired via get_sqe().\n// Returns the number of entries submitted.\n// Optionally wait for a number of events by setting wait_nr.\nsubmit :: proc(ring: ^IO_Uring, wait_nr: u32 = 0) -> (n_submitted: u32, err: IO_Uring_Error) {\n\tn_submitted = flush_sq(ring)\n\tflags: u32 = 0\n\tif sq_ring_needs_enter(ring, &flags) || wait_nr > 0 {\n\t\tif wait_nr > 0 || ring.flags & IORING_SETUP_IOPOLL != 0 {\n\t\t\tflags |= IORING_ENTER_GETEVENTS\n\t\t}\n\t\tn_submitted, err = enter(ring, n_submitted, wait_nr, flags)\n\t}\n\treturn\n}\n\n// Tells the kernel that submission queue entries were submitted and/or we want to wait for their completion queue entries.\n// Returns the number of submission queue entries that were submitted.\nenter :: proc(\n\tring: ^IO_Uring,\n\tn_to_submit: u32,\n\tmin_complete: u32,\n\tflags: u32,\n) -> (\n\tn_submitted: u32,\n\terr: IO_Uring_Error,\n) {\n\tassert(ring.fd >= 0)\n\tns := sys_io_uring_enter(u32(ring.fd), n_to_submit, min_complete, flags, nil)\n\tif ns < 0 {\n\t\t#partial switch os.Platform_Error(-ns) {\n\t\tcase .NONE:\n\t\t\terr = .None\n\t\tcase .EAGAIN:\n\t\t\t// The kernel was unable to allocate memory or ran out of resources for the request. (try again)\n\t\t\terr = .System_Resources\n\t\tcase .EBADF:\n\t\t\t// The SQE `fd` is invalid, or `IOSQE_FIXED_FILE` was set but no files were registered\n\t\t\terr = .File_Descriptor_Invalid\n\t\t// case os.EBUSY: // TODO: why is this not in os_linux\n\t\t// \t// Attempted to overcommit the number of requests it can have pending. Should wait for some completions and try again.\n\t\t// \terr = .Completion_Queue_Overcommitted\n\t\tcase .EINVAL:\n\t\t\t// The SQE is invalid, or valid but the ring was setup with `IORING_SETUP_IOPOLL`\n\t\t\terr = .Submission_Queue_Entry_Invalid\n\t\tcase .EFAULT:\n\t\t\t// The buffer is outside the process' accessible address space, or `IORING_OP_READ_FIXED`\n\t\t\t// or `IORING_OP_WRITE_FIXED` was specified but no buffers were registered, or the range\n\t\t\t// described by `addr` and `len` is not within the buffer registered at `buf_index`\n\t\t\terr = .Buffer_Invalid\n\t\tcase .ENXIO:\n\t\t\terr = .Ring_Shutting_Down\n\t\tcase .EOPNOTSUPP:\n\t\t\t// The kernel believes the `fd` doesn't refer to an `io_uring`, or the opcode isn't supported by this kernel (more likely)\n\t\t\terr = .Opcode_Not_Supported\n\t\tcase .EINTR:\n\t\t\t// The op was interrupted by a delivery of a signal before it could complete.This can happen while waiting for events with `IORING_ENTER_GETEVENTS`\n\t\t\terr = .Signal_Interrupt\n\t\tcase:\n\t\t\terr = .Unexpected\n\t\t}\n\t\treturn\n\t}\n\n\tn_submitted = u32(ns)\n\treturn\n}\n\n// Sync internal state with kernel ring state on the submission queue side.\n// Returns the number of all pending events in the submission queue.\n// Rationale is to determine that an enter call is needed.\nflush_sq :: proc(ring: ^IO_Uring) -> (n_pending: u32) {\n\tsq := &ring.sq\n\tto_submit := sq.sqe_tail - sq.sqe_head\n\tif to_submit != 0 {\n\t\ttail := sq.tail^\n\t\ti: u32 = 0\n\t\tfor ; i < to_submit; i += 1 {\n\t\t\tsq.array[tail & sq.mask] = sq.sqe_head & sq.mask\n\t\t\ttail += 1\n\t\t\tsq.sqe_head += 1\n\t\t}\n\t\tsync.atomic_store_explicit(sq.tail, tail, .Release)\n\t}\n\tn_pending = sq_ready(ring)\n\treturn\n}\n\n// Returns true if we are not using an SQ thread (thus nobody submits but us),\n// or if IORING_SQ_NEED_WAKEUP is set and the SQ thread must be explicitly awakened.\n// For the latter case, we set the SQ thread wakeup flag.\n// Matches the implementation of sq_ring_needs_enter() in liburing.\nsq_ring_needs_enter :: proc(ring: ^IO_Uring, flags: ^u32) -> bool {\n\tassert(flags^ == 0)\n\tif ring.flags & IORING_SETUP_SQPOLL == 0 { return true }\n\tif sync.atomic_load_explicit(ring.sq.flags, .Relaxed) & IORING_SQ_NEED_WAKEUP != 0 {\n\t\tflags^ |= IORING_ENTER_SQ_WAKEUP\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Returns the number of submission queue entries in the submission queue.\nsq_ready :: proc(ring: ^IO_Uring) -> u32 {\n\t// Always use the shared ring state (i.e. head and not sqe_head) to avoid going out of sync,\n\t// see https://github.com/axboe/liburing/issues/92.\n\treturn ring.sq.sqe_tail - sync.atomic_load_explicit(ring.sq.head, .Acquire)\n}\n\n// Returns the number of completion queue entries in the completion queue (yet to consume).\ncq_ready :: proc(ring: ^IO_Uring) -> (n_ready: u32) {\n\treturn sync.atomic_load_explicit(ring.cq.tail, .Acquire) - ring.cq.head^\n}\n\n// Copies as many CQEs as are ready, and that can fit into the destination `cqes` slice.\n// If none are available, enters into the kernel to wait for at most `wait_nr` CQEs.\n// Returns the number of CQEs copied, advancing the CQ ring.\n// Provides all the wait/peek methods found in liburing, but with batching and a single method.\ncopy_cqes :: proc(ring: ^IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) -> (n_copied: u32, err: IO_Uring_Error) {\n\tn_copied = copy_cqes_ready(ring, cqes)\n\tif n_copied > 0 { return }\n\tif wait_nr > 0 || cq_ring_needs_flush(ring) {\n\t\t_ = enter(ring, 0, wait_nr, IORING_ENTER_GETEVENTS) or_return\n\t\tn_copied = copy_cqes_ready(ring, cqes)\n\t}\n\treturn\n}\n\ncopy_cqes_ready :: proc(ring: ^IO_Uring, cqes: []io_uring_cqe) -> (n_copied: u32) {\n\tn_ready := cq_ready(ring)\n\tn_copied = min(u32(len(cqes)), n_ready)\n\thead := ring.cq.head^\n\ttail := head + n_copied\n\n\ti := 0\n\tfor head != tail {\n\t\tcqes[i] = ring.cq.cqes[head & ring.cq.mask]\n\t\thead += 1\n\t\ti += 1\n\t}\n\tcq_advance(ring, n_copied)\n\treturn\n}\n\ncq_ring_needs_flush :: proc(ring: ^IO_Uring) -> bool {\n\treturn sync.atomic_load_explicit(ring.sq.flags, .Relaxed) & IORING_SQ_CQ_OVERFLOW != 0\n}\n\n// For advanced use cases only that implement custom completion queue methods.\n// If you use copy_cqes() or copy_cqe() you must not call cqe_seen() or cq_advance().\n// Must be called exactly once after a zero-copy CQE has been processed by your application.\n// Not idempotent, calling more than once will result in other CQEs being lost.\n// Matches the implementation of cqe_seen() in liburing.\ncqe_seen :: proc(ring: ^IO_Uring) {\n\tcq_advance(ring, 1)\n}\n\n// For advanced use cases only that implement custom completion queue methods.\n// Matches the implementation of cq_advance() in liburing.\ncq_advance :: proc(ring: ^IO_Uring, count: u32) {\n\tif count == 0 { return }\n\tsync.atomic_store_explicit(ring.cq.head, ring.cq.head^ + count, .Release)\n}\n\n// Queues (but does not submit) an SQE to perform an `fsync(2)`.\n// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.\nfsync :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tfd: os.Handle,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = .FSYNC\n\tsqe.rw_flags = i32(flags)\n\tsqe.fd = i32(fd)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform a no-op.\n// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.\n// A no-op is more useful than may appear at first glance.\n// For example, you could call `drain_previous_sqes()` on the returned SQE, to use the no-op to\n// know when the ring is idle before acting on a kill signal.\nnop :: proc(ring: ^IO_Uring, user_data: u64) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = .NOP\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform a `read(2)`.\nread :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tfd: os.Handle,\n\tbuf: []u8,\n\toffset: u64,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = .READ\n\tsqe.fd = i32(fd)\n\tsqe.addr = cast(u64)uintptr(raw_data(buf))\n\tsqe.len = u32(len(buf))\n\tsqe.off = offset\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform a `write(2)`.\nwrite :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tfd: os.Handle,\n\tbuf: []u8,\n\toffset: u64,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = .WRITE\n\tsqe.fd = i32(fd)\n\tsqe.addr = cast(u64)uintptr(raw_data(buf))\n\tsqe.len = u32(len(buf))\n\tsqe.off = offset\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform an `accept4(2)` on a socket.\n// `addr`,`addr_len` optional\naccept :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tsockfd: os.Socket,\n\taddr: ^os.SOCKADDR = nil,\n\taddr_len: ^os.socklen_t = nil,\n\tflags: u32 = 0,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.ACCEPT\n\tsqe.fd = i32(sockfd)\n\tsqe.addr = cast(u64)uintptr(addr)\n\tsqe.off = cast(u64)uintptr(addr_len)\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queue (but does not submit) an SQE to perform a `connect(2)` on a socket.\nconnect :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tsockfd: os.Socket,\n\taddr: ^os.SOCKADDR,\n\taddr_len: os.socklen_t,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.CONNECT\n\tsqe.fd = i32(sockfd)\n\tsqe.addr = cast(u64)uintptr(addr)\n\tsqe.off = cast(u64)addr_len\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform a `recv(2)`.\nrecv :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tsockfd: os.Socket,\n\tbuf: []byte,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.RECV\n\tsqe.fd = i32(sockfd)\n\tsqe.addr = cast(u64)uintptr(raw_data(buf))\n\tsqe.len = cast(u32)uintptr(len(buf))\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform a `send(2)`.\nsend :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tsockfd: os.Socket,\n\tbuf: []byte,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.SEND\n\tsqe.fd = i32(sockfd)\n\tsqe.addr = cast(u64)uintptr(raw_data(buf))\n\tsqe.len = u32(len(buf))\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform an `openat(2)`.\nopenat :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tfd: os.Handle,\n\tpath: cstring,\n\tmode: u32,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.OPENAT\n\tsqe.fd = i32(fd)\n\tsqe.addr = cast(u64)transmute(uintptr)path\n\tsqe.len = mode\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to perform a `close(2)`.\nclose :: proc(ring: ^IO_Uring, user_data: u64, fd: os.Handle) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {\n\tsqe, err = get_sqe(ring)\n\tif err != .None {return}\n\tsqe.opcode = IORING_OP.CLOSE\n\tsqe.fd = i32(fd)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to register a timeout operation.\n// Returns a pointer to the SQE.\n//\n// The timeout will complete when either the timeout expires, or after the specified number of\n// events complete (if `count` is greater than `0`).\n//\n// `flags` may be `0` for a relative timeout, or `IORING_TIMEOUT_ABS` for an absolute timeout.\n//\n// The completion event result will be `-ETIME` if the timeout completed through expiration,\n// `0` if the timeout completed after the specified number of events, or `-ECANCELED` if the\n// timeout was removed before it expired.\n//\n// io_uring timeouts use the `CLOCK.MONOTONIC` clock source.\ntimeout :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tts: ^linux.Time_Spec,\n\tcount: u32,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.TIMEOUT\n\tsqe.fd = -1\n\tsqe.addr = cast(u64)uintptr(ts)\n\tsqe.len = 1\n\tsqe.off = u64(count)\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to remove an existing timeout operation.\n// Returns a pointer to the SQE.\n//\n// The timeout is identified by its `user_data`.\n//\n// The completion event result will be `0` if the timeout was found and cancelled successfully,\n// `-EBUSY` if the timeout was found but expiration was already in progress, or\n// `-ENOENT` if the timeout was not found.\ntimeout_remove :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\ttimeout_user_data: u64,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.TIMEOUT_REMOVE\n\tsqe.fd = -1\n\tsqe.addr = timeout_user_data\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\n// Queues (but does not submit) an SQE to add a link timeout operation.\n// Returns a pointer to the SQE.\n//\n// You need to set linux.IOSQE_IO_LINK to flags of the target operation\n// and then call this method right after the target operation.\n// See https://lwn.net/Articles/803932/ for detail.\n//\n// If the dependent request finishes before the linked timeout, the timeout\n// is canceled. If the timeout finishes before the dependent request, the\n// dependent request will be canceled.\n//\n// The completion event result of the link_timeout will be\n// `-ETIME` if the timeout finishes before the dependent request\n// (in this case, the completion event result of the dependent request will\n// be `-ECANCELED`), or\n// `-EALREADY` if the dependent request finishes before the linked timeout.\nlink_timeout :: proc(\n\tring: ^IO_Uring,\n\tuser_data: u64,\n\tts: ^os.Unix_File_Time,\n\tflags: u32,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe = get_sqe(ring) or_return\n\tsqe.opcode = IORING_OP.LINK_TIMEOUT\n\tsqe.fd = -1\n\tsqe.addr = cast(u64)uintptr(ts)\n\tsqe.len = 1\n\tsqe.rw_flags = i32(flags)\n\tsqe.user_data = user_data\n\treturn\n}\n\npoll_add :: proc(\n\tring:      ^IO_Uring,\n\tuser_data: u64,\n\tfd:        os.Handle,\n\tevents:    linux.Fd_Poll_Events,\n\tflags:     IORing_Poll_Flags,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe             = get_sqe(ring) or_return\n\tsqe.opcode      = IORING_OP.POLL_ADD\n\tsqe.fd          = i32(fd)\n\tsqe.poll_events = transmute(u16)events\n\tsqe.len         = transmute(u32)flags\n\tsqe.user_data   = user_data\n\treturn\n}\n\npoll_remove :: proc(\n\tring:      ^IO_Uring,\n\tuser_data: u64,\n\tfd:        os.Handle,\n\tevents:    linux.Fd_Poll_Events,\n) -> (\n\tsqe: ^io_uring_sqe,\n\terr: IO_Uring_Error,\n) {\n\tsqe             = get_sqe(ring) or_return\n\tsqe.opcode      = IORING_OP.POLL_REMOVE\n\tsqe.fd          = i32(fd)\n\tsqe.poll_events = transmute(u16)events\n\tsqe.user_data   = user_data\n\treturn\n}\n\nSubmission_Queue :: struct {\n\thead:      ^u32,\n\ttail:      ^u32,\n\tmask:      u32,\n\tflags:     ^u32,\n\tdropped:   ^u32,\n\tarray:     []u32,\n\tsqes:      []io_uring_sqe,\n\tmmap:      []u8,\n\tmmap_sqes: []u8,\n\n\t// We use `sqe_head` and `sqe_tail` in the same way as liburing:\n\t// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.\n\t// We then set `tail` to `sqe_tail` once, only when these events are actually submitted.\n\t// This allows us to amortize the cost of the @atomicStore to `tail` across multiple SQEs.\n\tsqe_head:  u32,\n\tsqe_tail:  u32,\n}\n\nsubmission_queue_make :: proc(fd: os.Handle, params: ^io_uring_params) -> (sq: Submission_Queue, ok: bool) {\n\tassert(fd >= 0)\n\t// Unsupported feature.\n\tassert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)\n\n\tsq_size := params.sq_off.array + params.sq_entries * size_of(u32)\n\tcq_size := params.cq_off.cqes + params.cq_entries * size_of(io_uring_cqe)\n\tsize := max(sq_size, cq_size)\n\n\tmmap_result := unix.sys_mmap(\n\t\tnil,\n\t\tuint(size),\n\t\tunix.PROT_READ | unix.PROT_WRITE,\n\t\tunix.MAP_SHARED,\n\t\t/* | unix.MAP_POPULATE */\n\t\tint(fd),\n\t\tIORING_OFF_SQ_RING,\n\t)\n\tif mmap_result < 0 { return }\n\tdefer if !ok { unix.sys_munmap(rawptr(uintptr(mmap_result)), uint(size)) }\n\n\tmmap := transmute([^]u8)uintptr(mmap_result)\n\n\tsize_sqes := params.sq_entries * size_of(io_uring_sqe)\n\tmmap_sqes_result := unix.sys_mmap(\n\t\tnil,\n\t\tuint(size_sqes),\n\t\tunix.PROT_READ | unix.PROT_WRITE,\n\t\tunix.MAP_SHARED,\n\t\t/* | unix.MAP_POPULATE */\n\t\tint(fd),\n\t\tIORING_OFF_SQES,\n\t)\n\tif mmap_sqes_result < 0 { return }\n\n\tarray := cast([^]u32)&mmap[params.sq_off.array]\n\tsqes := cast([^]io_uring_sqe)uintptr(mmap_sqes_result)\n\tmmap_sqes := cast([^]u8)uintptr(mmap_sqes_result)\n\n\n\tsq.head = cast(^u32)&mmap[params.sq_off.head]\n\tsq.tail = cast(^u32)&mmap[params.sq_off.tail]\n\tsq.mask = (cast(^u32)&mmap[params.sq_off.ring_mask])^\n\tsq.flags = cast(^u32)&mmap[params.sq_off.flags]\n\tsq.dropped = cast(^u32)&mmap[params.sq_off.dropped]\n\tsq.array = array[:params.sq_entries]\n\tsq.sqes = sqes[:params.sq_entries]\n\tsq.mmap = mmap[:size]\n\tsq.mmap_sqes = mmap_sqes[:size_sqes]\n\n\tok = true\n\treturn\n}\n\nsubmission_queue_destroy :: proc(sq: ^Submission_Queue) {\n\tunix.sys_munmap(raw_data(sq.mmap), uint(len(sq.mmap)))\n\tunix.sys_munmap(raw_data(sq.mmap_sqes), uint(len(sq.mmap)))\n}\n\nCompletion_Queue :: struct {\n\thead:     ^u32,\n\ttail:     ^u32,\n\tmask:     u32,\n\toverflow: ^u32,\n\tcqes:     []io_uring_cqe,\n}\n\ncompletion_queue_make :: proc(fd: os.Handle, params: ^io_uring_params, sq: ^Submission_Queue) -> Completion_Queue {\n\tassert(fd >= 0)\n\t// Unsupported feature.\n\tassert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)\n\n\tmmap := sq.mmap\n\tcqes := cast([^]io_uring_cqe)&mmap[params.cq_off.cqes]\n\n\treturn(\n\t\t{\n\t\t\thead = cast(^u32)&mmap[params.cq_off.head],\n\t\t\ttail = cast(^u32)&mmap[params.cq_off.tail],\n\t\t\tmask = (cast(^u32)&mmap[params.cq_off.ring_mask])^,\n\t\t\toverflow = cast(^u32)&mmap[params.cq_off.overflow],\n\t\t\tcqes = cqes[:params.cq_entries],\n\t\t} \\\n\t)\n}\n"
  },
  {
    "path": "old_nbio/_io_uring/sys.odin",
    "content": "#+build linux\npackage io_uring\n\nimport \"base:intrinsics\"\n\n//odinfmt:disable\nSYS_io_uring_setup:    uintptr : 425\nSYS_io_uring_enter:    uintptr : 426\nSYS_io_uring_register: uintptr : 427\n//odinfmt:enable\n\nNSIG :: 65\n\nsigset_t :: [1024 / 32]u32\n\nio_uring_params :: struct {\n\tsq_entries:     u32,\n\tcq_entries:     u32,\n\tflags:          u32,\n\tsq_thread_cpu:  u32,\n\tsq_thread_idle: u32,\n\tfeatures:       u32,\n\twq_fd:          u32,\n\tresv:           [3]u32,\n\tsq_off:         io_sqring_offsets,\n\tcq_off:         io_cqring_offsets,\n}\n#assert(size_of(io_uring_params) == 120)\n\nio_sqring_offsets :: struct {\n\thead:         u32,\n\ttail:         u32,\n\tring_mask:    u32,\n\tring_entries: u32,\n\tflags:        u32,\n\tdropped:      u32,\n\tarray:        u32,\n\tresv1:        u32,\n\tuser_addr:    u64,\n}\n\nio_cqring_offsets :: struct {\n\thead:         u32,\n\ttail:         u32,\n\tring_mask:    u32,\n\tring_entries: u32,\n\toverflow:     u32,\n\tcqes:         u32,\n\tflags:        u32,\n\tresv1:        u32,\n\tuser_addr:    u64,\n}\n\n// Submission queue entry.\nio_uring_sqe :: struct {\n\topcode:           IORING_OP, // u8\n\tflags:            u8, /* IOSQE_ flags */\n\tioprio:           u16, /* ioprio for the request */\n\tfd:               i32, /* file descriptor to do IO on */\n\tusing __offset:   struct #raw_union {\n\t\toff:     u64, /* offset into file */\n\t\taddr2:   u64,\n\t\tusing _: struct {\n\t\t\tcmd_op: u32,\n\t\t\t__pad1: u32,\n\t\t},\n\t},\n\tusing __iovecs:   struct #raw_union {\n\t\taddr:          u64, /* pointer to buffer or iovecs */\n\t\tsplice_off_in: u64,\n\t},\n\tlen:              u32, /* buffer size or number of iovecs */\n\tusing __contents: struct #raw_union {\n\t\trw_flags:         i32,\n\t\tfsync_flags:      u32,\n\t\tpoll_events:      u16, /* compatibility */\n\t\tpoll32_events:    u32, /* word-reversed for BE */\n\t\tsync_range_flags: u32,\n\t\tmsg_flags:        u32,\n\t\ttimeout_flags:    u32,\n\t\taccept_flags:     u32,\n\t\tcancel_flags:     u32,\n\t\topen_flags:       u32,\n\t\tstatx_flags:      u32,\n\t\tfadvise_advice:   u32,\n\t\tsplice_flags:     u32,\n\t\trename_flags:     u32,\n\t\tunlink_flags:     u32,\n\t\thardlink_flags:   u32,\n\t\txattr_flags:      u32,\n\t\tmsg_ring_flags:   u32,\n\t\turing_cmd_flags:  u32,\n\t},\n\tuser_data:        u64, /* data to be passed back at completion time */\n\t/* pack this to avoid bogus arm OABI complaints */\n\tusing __buffer:   struct #raw_union {\n\t\t/* index into fixed buffers, if used */\n\t\tbuf_index: u16,\n\t\t/* for grouped buffer selection */\n\t\tbuf_group: u16,\n\t},\n\t/* personality to use, if used */\n\tpersonality:      u16,\n\tusing _:          struct #raw_union {\n\t\tsplice_fd_in: i32,\n\t\tfile_index:   u32,\n\t\tusing _:      struct {\n\t\t\taddr_len: u16,\n\t\t\t__pad3:   [1]u16,\n\t\t},\n\t},\n\tusing __:         struct #raw_union {\n\t\tusing _: struct {\n\t\t\taddr3:  u64,\n\t\t\t__pad2: [1]u64,\n\t\t},\n\t\t/*\n\t\t * If the ring is initialized with IORING_SETUP_SQE128, then\n\t\t * this field is used for 80 bytes of arbitrary command data\n\t\t * NOTE: This is currently not supported.\n\t\t */\n\t\t// cmd:     [^]u8,\n\t},\n}\n#assert(size_of(io_uring_sqe) == 64)\n\n// Completion queue entry.\nio_uring_cqe :: struct {\n\tuser_data: u64, /* sq.data submission passed back */\n\tres:       i32, /* result code for this event */\n\tflags:     u32,\n\t/*\n\t * If the ring is initialized with IORING_SETUP_CQE32, then this field\n\t * contains 16-bytes of padding, doubling the size of the CQE.\n\t * NOTE: This is currently not supported.\n\t */\n\t// big_cqe:   [^]u64,\n}\n#assert(size_of(io_uring_cqe) == 16)\n\n/*\n * sqe.flags\n */\n/* use fixed fileset */\nIOSQE_FIXED_FILE: u32 : (1 << 0)\n/* issue after inflight IO */\nIOSQE_IO_DRAIN: u32 : (1 << 1)\n/* links next sqe */\nIOSQE_IO_LINK: u32 : (1 << 2)\n/* like LINK, but stronger */\nIOSQE_IO_HARDLINK: u32 : (1 << 3)\n/* always go async */\nIOSQE_ASYNC: u32 : (1 << 4)\n/* select buffer from sq.buf_group */\nIOSQE_BUFFER_SELECT: u32 : (1 << 5)\n/* don't post CQE if request succeeded */\nIOSQE_CQE_SKIP_SUCCESS: u32 : (1 << 6)\n\n/*\n * io_uring_setup() flags\n */\nIORING_SETUP_IOPOLL: u32 : (1 << 0) /* io_context is polled */\nIORING_SETUP_SQPOLL: u32 : (1 << 1) /* SQ poll thread */\nIORING_SETUP_SQ_AFF: u32 : (1 << 2) /* sq_thread_cpu is valid */\nIORING_SETUP_CQSIZE: u32 : (1 << 3) /* app defines CQ size */\nIORING_SETUP_CLAMP: u32 : (1 << 4) /* clamp SQ/CQ ring sizes */\nIORING_SETUP_ATTACH_WQ: u32 : (1 << 5) /* attach to existing wq */\nIORING_SETUP_R_DISABLED: u32 : (1 << 6) /* start with ring disabled */\nIORING_SETUP_SUBMIT_ALL: u32 : (1 << 7) /* continue submit on error */\n// Cooperative task running. When requests complete, they often require\n// forcing the submitter to transition to the kernel to complete. If this\n// flag is set, work will be done when the task transitions anyway, rather\n// than force an inter-processor interrupt reschedule. This avoids interrupting\n// a task running in userspace, and saves an IPI.\nIORING_SETUP_COOP_TASKRUN: u32 : (1 << 8)\n// If COOP_TASKRUN is set, get notified if task work is available for\n// running and a kernel transition would be needed to run it. This sets\n// IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.\nIORING_SETUP_TASKRUN_FLAG: u32 : (1 << 9)\nIORING_SETUP_SQE128: u32 : (1 << 10) /* SQEs are 128 byte */\nIORING_SETUP_CQE32: u32 : (1 << 11) /* CQEs are 32 byte */\n// Only one task is allowed to submit requests\nIORING_SETUP_SINGLE_ISSUER: u32 : (1 << 12)\n// Defer running task work to get events.\n// Rather than running bits of task work whenever the task transitions\n// try to do it just before it is needed.\nIORING_SETUP_DEFER_TASKRUN: u32 : (1 << 13)\n\n/*\n * sqe.uring_cmd_flags\n * IORING_URING_CMD_FIXED\tuse registered buffer; pass this flag\n *\t\t\t\talong with setting sqe.buf_index.\n */\nIORING_URING_CMD_FIXED: u32 : (1 << 0)\n\n/*\n * sqe.fsync_flags\n */\nIORING_FSYNC_DATASYNC: u32 : (1 << 0)\n\n/*\n * sqe.timeout_flags\n */\nIORING_TIMEOUT_ABS: u32 : (1 << 0)\nIORING_TIMEOUT_UPDATE: u32 : (1 << 1)\nIORING_TIMEOUT_BOOTTIME: u32 : (1 << 2)\nIORING_TIMEOUT_REALTIME: u32 : (1 << 3)\nIORING_LINK_TIMEOUT_UPDATE: u32 : (1 << 4)\nIORING_TIMEOUT_ETIME_SUCCESS: u32 : (1 << 5)\nIORING_TIMEOUT_CLOCK_MASK: u32 : (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)\nIORING_TIMEOUT_UPDATE_MASK: u32 : (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)\n\n/*\n * sq_ring.flags\n */\nIORING_SQ_NEED_WAKEUP: u32 : (1 << 0) /* needs io_uring_enter wakeup */\nIORING_SQ_CQ_OVERFLOW: u32 : (1 << 1) /* CQ ring is overflown */\nIORING_SQ_TASKRUN: u32 : (1 << 2) /* task should enter the kernel */\n\n/*\n * sqe.splice_flags\n * extends splice(2) flags\n */\nSPLICE_F_FD_IN_FIXED: u32 : (1 << 31) /* the last bit of __u32 */\n\n/*\n * POLL_ADD flags. Note that since sqe.poll_events is the flag space, the command flags for POLL_ADD are stored in sqe.len.\n *\n * IORING_POLL_ADD_MULTI\tMultishot poll. Sets IORING_CQE_F_MORE if the poll handler will continue to report CQEs on behalf of the same SQE.\n\n * IORING_POLL_UPDATE\t\tUpdate existing poll request, matching sqe.addr as the old user_data field.\n *\n * IORING_POLL_LEVEL\t\tLevel triggered poll.\n */\nIORING_POLL_ADD_MULTI: u32 : (1 << 0)\nIORING_POLL_UPDATE_EVENTS: u32 : (1 << 1)\nIORING_POLL_UPDATE_USER_DATA: u32 : (1 << 2)\nIORING_POLL_ADD_LEVEL: u32 : (1 << 3)\n\nIORing_Poll_Bits :: enum {\n\tADD_MULTI,\n\tUPDATE_EVENTS,\n\tUPDATE_USER_DATA,\n\tADD_LEVEL,\n}\n\nIORing_Poll_Flags :: bit_set[IORing_Poll_Bits; u32]\n\n/*\n  * send/sendmsg and recv/recvmsg flags (sq.ioprio)\n  *\n  * IORING_RECVSEND_POLL_FIRST\tIf set, instead of first attempting to send\n  *\t\t\t\tor receive and arm poll if that yields an\n  *\t\t\t\t-EAGAIN result, arm poll upfront and skip\n  *\t\t\t\tthe initial transfer attempt.\n  *\n  * IORING_RECV_MULTISHOT\tMultishot recv. Sets IORING_CQE_F_MORE if\n  *\t\t\t\tthe handler will continue to report\n  *\t\t\t\tCQEs on behalf of the same SQE.\n  *\n  * IORING_RECVSEND_FIXED_BUF\tUse registered buffers, the index is stored in\n  *\t\t\t\tthe buf_index field.\n  *\n  * IORING_SEND_ZC_REPORT_USAGE\n  *\t\t\t\tIf set, SEND[MSG]_ZC should report\n  *\t\t\t\tthe zerocopy usage in cqe.res\n  *\t\t\t\tfor the IORING_CQE_F_NOTIF cqe.\n  *\t\t\t\t0 is reported if zerocopy was actually possible.\n  *\t\t\t\tIORING_NOTIF_USAGE_ZC_COPIED if data was copied\n  *\t\t\t\t(at least partially).\n  */\nIORING_RECVSEND_POLL_FIRST: u32 : (1 << 0)\nIORING_RECV_MULTISHOT: u32 : (1 << 1)\nIORING_RECVSEND_FIXED_BUF: u32 : (1 << 2)\nIORING_SEND_ZC_REPORT_USAGE: u32 : (1 << 3)\n\n/*\n  * cqe.res for IORING_CQE_F_NOTIF if\n  * IORING_SEND_ZC_REPORT_USAGE was requested\n  *\n  * It should be treated as a flag, all other\n  * bits of cqe.res should be treated as reserved!\n  */\nIORING_NOTIF_USAGE_ZC_COPIED: u32 : (1 << 31)\n\n/*\n  * accept flags stored in sq.ioprio\n  */\nIORING_ACCEPT_MULTISHOT: u32 : (1 << 0)\n\n/*\n  * IORING_OP_MSG_RING command types, stored in sq.addr\n  */\nIORING_MSG :: enum {\n\tDATA, /* pass sq.len as 'res' and off as user_data */\n\tSEND_FD, /* send a registered fd to another ring */\n}\n\n/*\n  * IORING_OP_MSG_RING flags (sq.msg_ring_flags)\n  *\n  * IORING_MSG_RING_CQE_SKIP\tDon't post a CQE to the target ring. Not\n  *\t\t\t\tapplicable for IORING_MSG_DATA, obviously.\n  */\nIORING_MSG_RING_CQE_SKIP: u32 : (1 << 0)\n/* Pass through the flags from sq.file_index to cqe.flags */\nIORING_MSG_RING_FLAGS_PASS: u32 : (1 << 1)\n\nIORING_OP :: enum u8 {\n\tNOP,\n\tREADV,\n\tWRITEV,\n\tFSYNC,\n\tREAD_FIXED,\n\tWRITE_FIXED,\n\tPOLL_ADD,\n\tPOLL_REMOVE,\n\tSYNC_FILE_RANGE,\n\tSENDMSG,\n\tRECVMSG,\n\tTIMEOUT,\n\tTIMEOUT_REMOVE,\n\tACCEPT,\n\tASYNC_CANCEL,\n\tLINK_TIMEOUT,\n\tCONNECT,\n\tFALLOCATE,\n\tOPENAT,\n\tCLOSE,\n\tFILES_UPDATE,\n\tSTATX,\n\tREAD,\n\tWRITE,\n\tFADVISE,\n\tMADVISE,\n\tSEND,\n\tRECV,\n\tOPENAT2,\n\tEPOLL_CTL,\n\tSPLICE,\n\tPROVIDE_BUFFERS,\n\tREMOVE_BUFFERS,\n\tTEE,\n\tSHUTDOWN,\n\tRENAMEAT,\n\tUNLINKAT,\n\tMKDIRAT,\n\tSYMLINKAT,\n\tLINKAT,\n\t/* this goes last, obviously */\n\tLAST,\n}\n\n/*\n * sys_io_uring_register() opcodes and arguments.\n */\nIORING_REGISTER :: enum u32 {\n\tREGISTER_BUFFERS = 0,\n\tUNREGISTER_BUFFERS = 1,\n\tREGISTER_FILES = 2,\n\tUNREGISTER_FILES = 3,\n\tREGISTER_EVENTFD = 4,\n\tUNREGISTER_EVENTFD = 5,\n\tREGISTER_FILES_UPDATE = 6,\n\tREGISTER_EVENTFD_ASYNC = 7,\n\tREGISTER_PROBE = 8,\n\tREGISTER_PERSONALITY = 9,\n\tUNREGISTER_PERSONALITY = 10,\n\tREGISTER_RESTRICTIONS = 11,\n\tREGISTER_ENABLE_RINGS = 12,\n\t/* extended with tagging */\n\tREGISTER_FILES2 = 13,\n\tREGISTER_FILES_UPDATE2 = 14,\n\tREGISTER_BUFFERS2 = 15,\n\tREGISTER_BUFFERS_UPDATE = 16,\n\t/* set/clear io-wq thread affinities */\n\tREGISTER_IOWQ_AFF = 17,\n\tUNREGISTER_IOWQ_AFF = 18,\n\t/* set/get max number of io-wq workers */\n\tREGISTER_IOWQ_MAX_WORKERS = 19,\n\t/* register/unregister io_uring fd with the ring */\n\tREGISTER_RING_FDS = 20,\n\tUNREGISTER_RING_FDS = 21,\n\t/* register ring based provide buffer group */\n\tREGISTER_PBUF_RING = 22,\n\tUNREGISTER_PBUF_RING = 23,\n\t/* sync cancelation API */\n\tREGISTER_SYNC_CANCEL = 24,\n\t/* register a range of fixed file slots for automatic slot allocation */\n\tREGISTER_FILE_ALLOC_RANGE = 25,\n\t/* this goes last */\n\tREGISTER_LAST,\n\t/* flag added to the opcode to use a registered ring fd */\n\tREGISTER_USE_REGISTERED_RING = 1 << 31,\n}\n\nIORING_FEAT_SINGLE_MMAP: u32 : (1 << 0)\nIORING_FEAT_NODROP: u32 : (1 << 1)\nIORING_FEAT_SUBMIT_STABLE: u32 : (1 << 2)\nIORING_FEAT_RW_CUR_POS: u32 : (1 << 3)\nIORING_FEAT_CUR_PERSONALITY: u32 : (1 << 4)\nIORING_FEAT_FAST_POLL: u32 : (1 << 5)\nIORING_FEAT_POLL_32BITS: u32 : (1 << 6)\nIORING_FEAT_SQPOLL_NONFIXED: u32 : (1 << 7)\nIORING_FEAT_EXT_ARG: u32 : (1 << 8)\nIORING_FEAT_NATIVE_WORKERS: u32 : (1 << 9)\nIORING_FEAT_RSRC_TAGS: u32 : (1 << 10)\n\n/*\n * cqe.flags\n *\n * IORING_CQE_F_BUFFER\tIf set, the upper 16 bits are the buffer ID\n * IORING_CQE_F_MORE\tIf set, parent SQE will generate more CQE entries\n * IORING_CQE_F_SOCK_NONEMPTY\tIf set, more data to read after socket recv\n * IORING_CQE_F_NOTIF\tSet for notification CQEs. Can be used to distinct\n * \t\t\tthem from sends.\n */\nIORING_CQE_F_BUFFER: u32 : (1 << 0)\nIORING_CQE_F_MORE: u32 : (1 << 1)\nIORING_CQE_F_SOCK_NONEMPTY: u32 : (1 << 2)\nIORING_CQE_F_NOTIF: u32 : (1 << 3)\n\nIORING_CQE :: enum {\n\tBUFFER_SHIFT = 16,\n}\n\n/*\n * cq_ring->flags\n */\n// disable eventfd notifications\nIORING_CQ_EVENTFD_DISABLED: u32 : (1 << 0)\n\n/*\n * io_uring_enter(2) flags\n */\nIORING_ENTER_GETEVENTS: u32 : (1 << 0)\nIORING_ENTER_SQ_WAKEUP: u32 : (1 << 1)\nIORING_ENTER_SQ_WAIT: u32 : (1 << 2)\nIORING_ENTER_EXT_ARG: u32 : (1 << 3)\nIORING_ENTER_REGISTERED_RING: u32 : (1 << 4)\n\n/*\n * Magic offsets for the application to mmap the data it needs\n */\nIORING_OFF_SQ_RING: uintptr : 0\nIORING_OFF_CQ_RING: u64 : 0x8000000\nIORING_OFF_SQES: uintptr : 0x10000000\nIORING_OFF_PBUF_RING: u64 : 0x80000000\nIORING_OFF_PBUF_SHIFT :: 16\nIORING_OFF_MMAP_MASK: u64 : 0xf8000000\n\nsys_io_uring_setup :: proc \"contextless\" (entries: u32, params: ^io_uring_params) -> int {\n\treturn int(intrinsics.syscall(SYS_io_uring_setup, uintptr(entries), uintptr(params)))\n}\n\nsys_io_uring_enter :: proc \"contextless\" (\n\tfd: u32,\n\tto_submit: u32,\n\tmin_complete: u32,\n\tflags: u32,\n\tsig: ^sigset_t,\n) -> int {\n\treturn int(\n\t\tintrinsics.syscall(\n\t\t\tSYS_io_uring_enter,\n\t\t\tuintptr(fd),\n\t\t\tuintptr(to_submit),\n\t\t\tuintptr(min_complete),\n\t\t\tuintptr(flags),\n\t\t\tuintptr(sig),\n\t\t\tNSIG / 8 if sig != nil else 0,\n\t\t),\n\t)\n}\n\nsys_io_uring_register :: proc \"contextless\" (fd: u32, opcode: IORING_REGISTER, arg: rawptr, nr_args: u32) -> int {\n\treturn int(intrinsics.syscall(SYS_io_uring_register, uintptr(fd), uintptr(opcode), uintptr(arg), uintptr(nr_args)))\n}\n"
  },
  {
    "path": "old_nbio/doc.odin",
    "content": "/*\npackage nbio implements a non blocking IO abstraction layer over several platform specific APIs.\n\nThis package implements an event loop based abstraction.\n\nAPIs:\n- Windows: [[IOCP IO Completion Ports;https://en.wikipedia.org/wiki/Input/output_completion_port]]\n- Linux:   [[io_uring;https://en.wikipedia.org/wiki/Io_uring]]\n- Darwin:  [[KQueue;https://en.wikipedia.org/wiki/Kqueue]]\n\nHow to read the code:\n\nThe file nbio.odin can be read a little bit like a header file,\nit has all the procedures heavily explained and commented and dispatches them to platform specific code.\n\nYou can also have a look at the tests for more general usages.\n\nExample:\n\t/*\n\tThis example shows a simple TCP server that echos back anything it receives.\n\n\tBetter error handling and closing/freeing connections are left for the reader.\n\t*/\n\tpackage main\n\n\timport \"core:fmt\"\n\timport \"core:net\"\n\timport \"core:os\"\n\n\timport nbio \"nbio/poly\"\n\n\tEcho_Server :: struct {\n\t\tio:          nbio.IO,\n\t\tsock:        net.TCP_Socket,\n\t\tconnections: [dynamic]^Echo_Connection,\n\t}\n\n\tEcho_Connection :: struct {\n\t\tserver:  ^Echo_Server,\n\t\tsock:    net.TCP_Socket,\n\t\tbuf:     [50]byte,\n\t}\n\n\tmain :: proc() {\n\t\tserver: Echo_Server\n\t\tdefer delete(server.connections)\n\n\t\tnbio.init(&server.io)\n\t\tdefer nbio.destroy(&server.io)\n\n\t\tsock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})\n\t\tfmt.assertf(err == nil, \"Error opening and listening on localhost:8080: %v\", err)\n\t\tserver.sock = sock\n\n\t\tnbio.accept(&server.io, sock, &server, echo_on_accept)\n\n\t\t// Start the event loop.\n\t\terrno: os.Errno\n\t\tfor errno == os.ERROR_NONE {\n\t\t\terrno = nbio.tick(&server.io)\n\t\t}\n\n\t\tfmt.assertf(errno == os.ERROR_NONE, \"Server stopped with error code: %v\", errno)\n\t}\n\n\techo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\t\tfmt.assertf(err == nil, \"Error accepting a connection: %v\", err)\n\n\t\t// Register a new accept for the next client.\n\t\tnbio.accept(&server.io, server.sock, server, echo_on_accept)\n\n\t\tc := new(Echo_Connection)\n\t\tc.server = server\n\t\tc.sock   = client\n\t\tappend(&server.connections, c)\n\n\t\tnbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)\n\t}\n\n\techo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {\n\t\tfmt.assertf(err == nil, \"Error receiving from client: %v\", err)\n\n\t\tnbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)\n\t}\n\n\techo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {\n\t\tfmt.assertf(err == nil, \"Error sending to client: %v\", err)\n\n\t\t// Accept the next message, to then ultimately echo back again.\n\t\tnbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)\n\t}\n*/\npackage nbio\n"
  },
  {
    "path": "old_nbio/nbio.odin",
    "content": "package nbio\n\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\n\n/*\nThe main IO type that holds the platform dependant implementation state passed around most procedures in this package\n*/\nIO :: _IO\n\n/*\nInitializes the IO type, allocates different things per platform needs\n\n*Allocates Using Provided Allocator*\n\nInputs:\n- io:        The IO struct to initialize\n- allocator: (default: context.allocator)\n\nReturns:\n- err: An error code when something went wrong with the setup of the platform's IO API, 0 otherwise\n*/\ninit :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {\n\treturn _init(io, allocator)\n}\n\n/*\nThe place where the magic happens, each time you call this the IO implementation checks its state\nand calls any callbacks which are ready. You would typically call this in a loop\n\nInputs:\n- io: The IO instance to tick\n\nReturns:\n- err: An error code when something went when retrieving events, 0 otherwise\n*/\ntick :: proc(io: ^IO) -> os.Errno {\n\treturn _tick(io)\n}\n\n/*\nReturns the number of in-progress IO to be completed.\n*/\nnum_waiting :: #force_inline proc(io: ^IO) -> int {\n\treturn _num_waiting(io)\n}\n\n/*\nDeallocates anything that was allocated when calling init()\n\nInputs:\n- io: The IO instance to deallocate\n\n*Deallocates with the allocator that was passed with the init() call*\n*/\ndestroy :: proc(io: ^IO) {\n\t_destroy(io)\n}\n\n/*\nThe callback for a \"next tick\" event\n\nInputs:\n- user: A passed through pointer from initiation to its callback\n*/\nOn_Next_Tick :: #type proc(user: rawptr)\n\n/*\nSchedules a callback to be called during the next tick of the event loop.\n\nInputs:\n- io:   The IO instance to use\n- user: A pointer that will be passed through to the callback, free to use by you and untouched by us\n*/\nnext_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {\n\treturn _next_tick(io, user, callback)\n}\n\n/*\nThe callback for non blocking `timeout` calls\n\nInputs:\n- user: A passed through pointer from initiation to its callback\n*/\nOn_Timeout :: #type proc(user: rawptr)\n\n/*\nSchedules a callback to be called after the given duration elapses.\n\nThe accuracy depends on the time between calls to `tick`.\nWhen you call it in a loop with no blocks or very expensive calculations in other scheduled event callbacks\nit is reliable to about a ms of difference (so timeout of 10ms would almost always be ran between 10ms and 11ms).\n\nInputs:\n- io:       The IO instance to use\n- dur:      The minimum duration to wait before calling the given callback\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Timeout` for its arguments\n*/\ntimeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) {\n\t_timeout(io, dur, user, callback)\n}\n\n/*\nCreates a socket, sets non blocking mode and relates it to the given IO\n\nInputs:\n- io:       The IO instance to initialize the socket on/with\n- family:   Should this be an IP4 or IP6 socket\n- protocol: The type of socket (TCP or UDP)\n\nReturns:\n- socket: The opened socket\n- err:    A network error that happened while opening\n*/\nopen_socket :: proc(\n\tio: ^IO,\n\tfamily: net.Address_Family,\n\tprotocol: net.Socket_Protocol,\n) -> (\n\tsocket: net.Any_Socket,\n\terr: net.Network_Error,\n) {\n\treturn _open_socket(io, family, protocol)\n}\n\n/*\nCreates a socket, sets non blocking mode, relates it to the given IO, binds the socket to the given endpoint and starts listening\n\nInputs:\n- io:       The IO instance to initialize the socket on/with\n- endpoint: Where to bind the socket to\n\nReturns:\n- socket: The opened, bound and listening socket\n- err:    A network error that happened while opening\n*/\nopen_and_listen_tcp :: proc(io: ^IO, ep: net.Endpoint) -> (socket: net.TCP_Socket, err: net.Network_Error) {\n\tfamily := net.family_from_endpoint(ep)\n\tsock := open_socket(io, family, .TCP) or_return\n\tsocket = sock.(net.TCP_Socket)\n\n\tif err = net.bind(socket, ep); err != nil {\n\t\tnet.close(socket)\n\t\treturn\n\t}\n\n\tif err = listen(socket); err != nil {\n\t\tnet.close(socket)\n\t}\n\treturn\n}\n\n/*\nStarts listening on the given socket\n\nInputs:\n- socket:  The socket to start listening\n- backlog: The amount of events to keep in the backlog when they are not consumed\n\nReturns:\n- err: A network error that happened when starting listening\n*/\nlisten :: proc(socket: net.TCP_Socket, backlog := 1000) -> (err: net.Network_Error) {\n\treturn _listen(socket, backlog)\n}\n\n/*\nThe callback for non blocking `close` requests\n\nInputs:\n- user: A passed through pointer from initiation to its callback\n- ok:   Whether the operation suceeded sucessfully\n*/\nOn_Close :: #type proc(user: rawptr, ok: bool)\n\n@private\nempty_on_close :: proc(_: rawptr, _: bool) {}\n\n/*\nA union of types that are `close`'able by this package\n*/\nClosable :: union #no_nil {\n\tnet.TCP_Socket,\n\tnet.UDP_Socket,\n\tnet.Socket,\n\tos.Handle,\n}\n\n/*\nCloses the given `Closable` socket or file handle that was originally created by this package.\n\n*Due to platform limitations, you must pass a `Closable` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The `Closable` socket or handle (created using/by this package) to close\n- user:     An optional pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: An optional callback that is called when the operation completes, see docs for `On_Close` for its arguments\n*/\nclose :: proc(io: ^IO, fd: Closable, user: rawptr = nil, callback: On_Close = empty_on_close) {\n\t_close(io, fd, user, callback)\n}\n\n/*\nThe callback for non blocking `accept` requests\n\nInputs:\n- user:   A passed through pointer from initiation to its callback\n- client: The socket to communicate through with the newly accepted client\n- source: The origin of the client\n- err:    A network error that occured during the accept process\n*/\nOn_Accept :: #type proc(user: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error)\n\n/*\nUsing the given socket, accepts the next incoming connection, calling the callback when that happens\n\n*Due to platform limitations, you must pass a socket that was opened using the `open_socket` and related procedures from this package*\n\nInputs:\n- io:       The IO instance to use\n- socket:   A bound and listening socket *that was created using this package*\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Accept` for its arguments\n*/\naccept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) {\n\t_accept(io, socket, user, callback)\n}\n\n/*\nThe callback for non blocking `connect` requests\n\nInputs:\n- user:   A passed through pointer from initiation to its callback\n- socket: A socket that is connected to the given endpoint in the `connect` call\n- err:    A network error that occured during the connect call\n*/\nOn_Connect :: #type proc(user: rawptr, socket: net.TCP_Socket, err: net.Network_Error)\n\n/*\nConnects to the given endpoint, calling the given callback once it has been done\n\nInputs:\n- io:       The IO instance to use\n- endpoint: An endpoint to connect a socket to\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Connect` for its arguments\n*/\nconnect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) {\n\t_, err := _connect(io, endpoint, user, callback)\n\tif err != nil {\n\t\tcallback(user, {}, err)\n\t}\n}\n\n/*\nThe callback for non blocking `recv` requests\n\nInputs:\n- user:       A passed through pointer from initiation to its callback\n- received:   The amount of bytes that were read and added to the given buf\n- udp_client: If the given socket was a `net.UDP_Socket`, this will be the client that was received from\n- err:        A network error if it occured\n*/\nOn_Recv :: #type proc(user: rawptr, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error)\n\n/*\nReceives from the given socket, at most `len(buf)` bytes, and calls the given callback\n\n*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- socket:   Either a `net.TCP_Socket` or a `net.UDP_Socket` (that was opened/returned by this package) to receive from\n- buf:      The buffer to put received bytes into\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Recv` for its arguments\n*/\nrecv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv) {\n\t_recv(io, socket, buf, user, callback)\n}\n\n/*\nReceives from the given socket until the given buf is full or an error occurred, and calls the given callback\n\n*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- socket:   Either a `net.TCP_Socket` or a `net.UDP_Socket` (that was opened/returned by this package) to receive from\n- buf:      The buffer to put received bytes into\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Recv` for its arguments\n*/\nrecv_all :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv) {\n\t_recv(io, socket, buf, user, callback, all = true)\n}\n\n/*\nThe callback for non blocking `send` and `send_all` requests\n\nInputs:\n- user: A passed through pointer from initiation to its callback\n- sent: The amount of bytes that were sent over the connection\n- err:  A network error if it occured\n*/\nOn_Sent :: #type proc(user: rawptr, sent: int, err: net.Network_Error)\n\n/*\nSends at most `len(buf)` bytes from the given buffer over the socket connection, and calls the given callback\n\n*Prefer using the `send` proc group*\n\n*Due to platform limitations, you must pass a `net.TCP_Socket` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- socket:   a `net.TCP_Socket` (that was opened/returned by this package) to send to\n- buf:      The buffer send\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments\n*/\nsend_tcp :: proc(io: ^IO, socket: net.TCP_Socket, buf: []byte, user: rawptr, callback: On_Sent) {\n\t_send(io, socket, buf, user, callback)\n}\n\n/*\nSends at most `len(buf)` bytes from the given buffer over the socket connection to the given endpoint, and calls the given callback\n\n*Prefer using the `send` proc group*\n\n*Due to platform limitations, you must pass a `net.UDP_Socket` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- endpoint: The endpoint to send bytes to over the socket\n- socket:   a `net.UDP_Socket` (that was opened/returned by this package) to send to\n- buf:      The buffer send\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments\n*/\nsend_udp :: proc(\n\tio: ^IO,\n\tendpoint: net.Endpoint,\n\tsocket: net.UDP_Socket,\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Sent,\n) {\n\t_send(io, socket, buf, user, callback, endpoint)\n}\n\n/*\nSends at most `len(buf)` bytes from the given buffer over the socket connection, and calls the given callback\n\n*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*\n*/\nsend :: proc {\n\tsend_udp,\n\tsend_tcp,\n}\n\n/*\nSends the bytes from the given buffer over the socket connection, and calls the given callback\n\nThis will keep sending until either an error or the full buffer is sent\n\n*Prefer using the `send` proc group*\n\n*Due to platform limitations, you must pass a `net.TCP_Socket` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- socket:   a `net.TCP_Socket` (that was opened/returned by this package) to send to\n- buf:      The buffer send\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments\n*/\nsend_all_tcp :: proc(io: ^IO, socket: net.TCP_Socket, buf: []byte, user: rawptr, callback: On_Sent) {\n\t_send(io, socket, buf, user, callback, all = true)\n}\n\n/*\nSends the bytes from the given buffer over the socket connection to the given endpoint, and calls the given callback\n\nThis will keep sending until either an error or the full buffer is sent\n\n*Prefer using the `send` proc group*\n\n*Due to platform limitations, you must pass a `net.UDP_Socket` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- endpoint: The endpoint to send bytes to over the socket\n- socket:   a `net.UDP_Socket` (that was opened/returned by this package) to send to\n- buf:      The buffer send\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments\n*/\nsend_all_udp :: proc(\n\tio: ^IO,\n\tendpoint: net.Endpoint,\n\tsocket: net.UDP_Socket,\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Sent,\n) {\n\t_send(io, socket, buf, user, callback, endpoint, all = true)\n}\n\n/*\nSends the bytes from the given buffer over the socket connection, and calls the given callback\n\nThis will keep sending until either an error or the full buffer is sent\n\n*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*\n*/\nsend_all :: proc {\n\tsend_all_udp,\n\tsend_all_tcp,\n}\n\n/*\nOpens a file hande, sets non blocking mode and relates it to the given IO\n\n*The perm argument is only used when on the darwin or linux platforms, when on Windows you can't use the os.S_\\* constants because they aren't declared*\n*To prevent compilation errors on Windows, you should use a `when` statement around using those constants and just pass 0*\n\nInputs:\n- io:   The IO instance to connect the opened file to\n- mode: The file mode                                 (default: os.O_RDONLY)\n- perm: The permissions to use when creating a file   (default: 0)\n\nReturns:\n- handle: The file handle\n- err:    The error code when an error occured, 0 otherwise\n*/\nopen :: proc(io: ^IO, path: string, mode: int = os.O_RDONLY, perm: int = 0) -> (handle: os.Handle, err: os.Errno) {\n\treturn _open(io, path, mode, perm)\n}\n\n/*\nWhere to seek from\n\nOptions:\n- Set:  sets the offset to the given value\n- Curr: adds the given offset to the current offset\n- End:  adds the given offset to the end of the file\n*/\nWhence :: enum {\n\tSet,\n\tCurr,\n\tEnd,\n}\n\n/*\nSeeks the given handle according to the given offset and whence, so that subsequent read and writes *USING THIS PACKAGE* will do so at that offset\n\n*Some platforms require this package to handle offsets while others have state in the kernel, for this reason you should assume that seeking only affects this package*\n\nInputs:\n- io:     The IO instance to seek on\n- fd:     The file handle to seek\n- whence: The seek mode/where to seek from (default: Whence.Set)\n\nReturns:\n- new_offset: The offset that the file is at when the operation completed\n- err:        The error when an error occured, 0 otherwise\n*/\nseek :: proc(io: ^IO, fd: os.Handle, offset: int, whence: Whence = .Set) -> (new_offset: int, err: os.Errno) {\n\treturn _seek(io, fd, offset, whence)\n}\n\n/*\nThe callback for non blocking `read` or `read_at` requests\n\nInputs:\n- user: A passed through pointer from initiation to its callback\n- read: The amount of bytes that were read and added to the given buf\n- err:  An error number if an error occured, 0 otherwise\n*/\nOn_Read :: #type proc(user: rawptr, read: int, err: os.Errno)\n\n/*\nReads from the given handle, at the handle's internal offset, at most `len(buf)` bytes, increases the file offset, and calls the given callback\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to read from\n- buf:      The buffer to put read bytes into\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments\n*/\nread :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Read) {\n\t_read(io, fd, nil, buf, user, callback)\n}\n\n/*\nReads from the given handle, at the handle's internal offset, until the given buf is full or an error occurred, increases the file offset, and calls the given callback\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to read from\n- buf:      The buffer to put read bytes into\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments\n*/\nread_all :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Read) {\n\t_read(io, fd, nil, buf, user, callback, all = true)\n}\n\n/*\nReads from the given handle, at the given offset, at most `len(buf)` bytes, and calls the given callback\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to read from\n- offset:   The offset to begin the read from\n- buf:      The buffer to put read bytes into\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments\n*/\nread_at :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Read) {\n\t_read(io, fd, offset, buf, user, callback)\n}\n\n/*\nReads from the given handle, at the given offset, until the given buf is full or an error occurred, and calls the given callback\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to read from\n- offset:   The offset to begin the read from\n- buf:      The buffer to put read bytes into\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments\n*/\nread_at_all :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Read) {\n\t_read(io, fd, offset, buf, user, callback, all = true)\n}\n\nread_entire_file :: read_full\n\n/*\nReads the entire file (size found by seeking to the end) into a singly allocated buffer that is returned.\nThe callback is called once the file is read into the returned buf.\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to read from\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments\n\nReturns:\n- buf:      The buffer allocated to the size retrieved by seeking to the end of the file that is filled before calling the callback\n*/\nread_full :: proc(io: ^IO, fd: os.Handle, user: rawptr, callback: On_Read, allocator := context.allocator) -> []byte {\n\tsize, err := seek(io, fd, 0, .End)\n\tif err != os.ERROR_NONE {\n\t\tcallback(user, 0, err)\n\t\treturn nil\n\t}\n\n\tif size <= 0 {\n\t\tcallback(user, 0, os.ERROR_NONE)\n\t\treturn nil\n\t}\n\n\tbuf := make([]byte, size, allocator)\n\tread_at_all(io, fd, 0, buf, user, callback)\n\treturn buf\n}\n\n/*\nThe callback for non blocking `write`, `write_all`, `write_at` and `write_at_all` requests\n\nInputs:\n- user:     A passed through pointer from initiation to its callback\n- written:  The amount of bytes that were written to the file\n- err:      An error number if an error occured, 0 otherwise\n*/\nOn_Write :: #type proc(user: rawptr, written: int, err: os.Errno)\n\n/*\nWrites to the given handle, at the handle's internal offset, at most `len(buf)` bytes, increases the file offset, and calls the given callback\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to write to\n- buf:      The buffer to write to the file\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments\n*/\nwrite :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Write) {\n\t_write(io, fd, nil, buf, user, callback)\n}\n\n/*\nWrites the given buffer to the given handle, at the handle's internal offset, increases the file offset, and calls the given callback\n\nThis keeps writing until either an error or the full buffer being written\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to write to\n- buf:      The buffer to write to the file\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments\n*/\nwrite_all :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Write) {\n\t_write(io, fd, nil, buf, user, callback, true)\n}\n\n/*\nWrites to the given handle, at the given offset, at most `len(buf)` bytes, and calls the given callback\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to write to from\n- offset:   The offset to begin the write from\n- buf:      The buffer to write to the file\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments\n*/\nwrite_at :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Write) {\n\t_write(io, fd, offset, buf, user, callback)\n}\n\n/*\nWrites the given buffer to the given handle, at the given offset, and calls the given callback\n\nThis keeps writing until either an error or the full buffer being written\n\n*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file handle (created using/by this package) to write to from\n- offset:   The offset to begin the write from\n- buf:      The buffer to write to the file\n- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments\n*/\nwrite_at_all :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Write) {\n\t_write(io, fd, offset, buf, user, callback, true)\n}\n\nPoll_Event :: enum {\n\t// The subject is ready to be read from.\n\tRead,\n\t// The subject is ready to be written to.\n\tWrite,\n}\n\n/*\nThe callback for poll requests\n\nInputs:\n- user:  A passed through pointer from initiation to its callback\n- event: The event that is ready to go\n*/\nOn_Poll :: #type proc(user: rawptr, event: Poll_Event)\n\n/*\nPolls for the given event on the subject handle\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file descriptor to poll\n- event:    Whether to call the callback when `fd` is ready to be read from, or be written to\n- multi:    Keeps the poll after an event happens, calling the callback again for further events, remove poll with `poll_remove`\n- user:     An optional pointer that will be passed through to the callback, free to use by you and untouched by us\n- callback: The callback that is called when the operation completes, see docs for `On_Poll` for its arguments\n*/\npoll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) {\n\t_poll(io, fd, event, multi, user, callback)\n}\n\n/*\nRemoves the polling for this `subject`+`event` pairing\n\nThis is only needed when `poll` was called with `multi` set to `true`\n\nInputs:\n- io:       The IO instance to use\n- fd:       The file descriptor to remove the poll of\n- event:    The event to remove the poll of\n*/\npoll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) {\n\t_poll_remove(io, fd, event)\n}\n\nMAX_USER_ARGUMENTS :: size_of(rawptr) * 5\n\nCompletion :: struct {\n\t// Implementation specifics, don't use outside of implementation/os.\n\tusing _:   _Completion,\n\n\tuser_data: rawptr,\n\n\t// Callback pointer and user args passed in poly variants.\n\tuser_args: [MAX_USER_ARGUMENTS + size_of(rawptr)]byte,\n}\n\n@(private)\nOperation :: union #no_nil {\n\tOp_Accept,\n\tOp_Close,\n\tOp_Connect,\n\tOp_Read,\n\tOp_Recv,\n\tOp_Send,\n\tOp_Write,\n\tOp_Timeout,\n\tOp_Next_Tick,\n\tOp_Poll,\n\tOp_Poll_Remove,\n}\n"
  },
  {
    "path": "old_nbio/nbio_darwin.odin",
    "content": "package nbio\n\nimport \"core:container/queue\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\nimport \"core:sys/kqueue\"\nimport \"core:sys/posix\"\n\n_init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {\n\tqerr: posix.Errno\n\tio.kq, qerr = kqueue.kqueue()\n\tif qerr != nil { return kq_err_to_os_err(qerr) }\n\n\tpool_init(&io.completion_pool, allocator = allocator)\n\n\tio.timeouts = make([dynamic]^Completion, allocator)\n\tio.io_pending = make([dynamic]^Completion, allocator)\n\n\tqueue.init(&io.completed, allocator = allocator)\n\n\tio.allocator = allocator\n\treturn\n}\n\n_num_waiting :: #force_inline proc(io: ^IO) -> int {\n\treturn io.completion_pool.num_waiting\n}\n\n_destroy :: proc(io: ^IO) {\n\tcontext.allocator = io.allocator\n\n\tdelete(io.timeouts)\n\tdelete(io.io_pending)\n\n\tqueue.destroy(&io.completed)\n\n\tposix.close(io.kq)\n\n\tpool_destroy(&io.completion_pool)\n}\n\n_tick :: proc(io: ^IO) -> os.Errno {\n\treturn flush(io)\n}\n\n_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> net.Network_Error {\n\terrno := os.listen(os.Socket(socket), backlog)\n\tif errno != nil {\n\t\treturn net._listen_error()\n\t}\n\treturn nil\n}\n\n_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx       = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Accept{\n\t\tcallback = callback,\n\t\tsock     = socket,\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n// Wraps os.close using the kqueue.\n_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx           = context\n\tcompletion.user_data     = user\n\n\tcompletion.operation = Op_Close{\n\t\tcallback = callback,\n\t}\n\top := &completion.operation.(Op_Close)\n\n\tswitch h in fd {\n\tcase net.TCP_Socket: op.handle = os.Handle(h)\n\tcase net.UDP_Socket: op.handle = os.Handle(h)\n\tcase net.Socket:     op.handle = os.Handle(h)\n\tcase os.Handle:      op.handle = h\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n// TODO: maybe call this dial?\n_connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {\n\tif endpoint.port == 0 {\n\t\treturn nil, net.Dial_Error.Port_Required\n\t}\n\n\tfamily := net.family_from_endpoint(endpoint)\n\tsock, err := net.create_socket(family, .TCP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif prep_err := _prepare_socket(sock); prep_err != nil {\n\t\tclose(io, net.any_socket_to_socket(sock))\n\t\treturn nil, prep_err\n\t}\n\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Connect {\n\t\tcallback = callback,\n\t\tsocket   = sock.(net.TCP_Socket),\n\t\tsockaddr = _endpoint_to_sockaddr(endpoint),\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion, nil\n}\n\n_read :: proc(\n\tio: ^IO,\n\tfd: os.Handle,\n\toffset: Maybe(int),\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Read,\n\tall := false,\n) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Read {\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\tbuf      = buf,\n\t\toffset   = offset.? or_else -1,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Recv {\n\t\tcallback = callback,\n\t\tsocket   = socket,\n\t\tbuf      = buf,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n_send :: proc(\n\tio: ^IO,\n\tsocket: net.Any_Socket,\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Sent,\n\tendpoint: Maybe(net.Endpoint) = nil,\n\tall := false,\n) -> ^Completion {\n\tif _, ok := socket.(net.UDP_Socket); ok {\n\t\tassert(endpoint != nil)\n\t}\n\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Send {\n\t\tcallback = callback,\n\t\tsocket   = socket,\n\t\tbuf      = buf,\n\t\tendpoint = endpoint,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n_write :: proc(\n\tio: ^IO,\n\tfd: os.Handle,\n\toffset: Maybe(int),\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Write,\n\tall := false,\n) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Write {\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\tbuf      = buf,\n\t\toffset   = offset.? or_else -1,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n// Runs the callback after the timeout, using the kqueue.\n_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Timeout {\n\t\tcallback = callback,\n\t\texpires  = time.time_add(time.now(), dur),\n\t}\n\n\tappend(&io.timeouts, completion)\n\treturn completion\n}\n\n_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Next_Tick {\n\t\tcallback = callback,\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Poll{\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\tevent    = event,\n\t\tmulti    = multi,\n\t}\n\n\tappend(&io.io_pending, completion)\n\treturn completion\n}\n\n_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.operation = Op_Poll_Remove{\n\t\tfd    = fd,\n\t\tevent = event,\n\t}\n\n\tappend(&io.io_pending, completion)\n\treturn completion\n}\n"
  },
  {
    "path": "old_nbio/nbio_internal_darwin.odin",
    "content": "#+private\npackage nbio\n\nimport \"base:runtime\"\n\nimport \"core:container/queue\"\nimport \"core:mem\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\nimport \"core:sys/posix\"\nimport \"core:sys/kqueue\"\n\nMAX_EVENTS :: 256\n\n_IO :: struct {\n\tkq:              posix.FD,\n\tio_inflight:     int,\n\tcompletion_pool: Pool(Completion),\n\ttimeouts:        [dynamic]^Completion,\n\tcompleted:       queue.Queue(^Completion),\n\tio_pending:      [dynamic]^Completion,\n\tallocator:       mem.Allocator,\n}\n\n_Completion :: struct {\n\toperation: Operation,\n\tctx:       runtime.Context,\n}\n\nOp_Accept :: struct {\n\tcallback: On_Accept,\n\tsock:     net.TCP_Socket,\n}\n\nOp_Close :: struct {\n\tcallback: On_Close,\n\thandle:   os.Handle,\n}\n\nOp_Connect :: struct {\n\tcallback:  On_Connect,\n\tsocket:    net.TCP_Socket,\n\tsockaddr:  os.SOCKADDR_STORAGE_LH,\n\tinitiated: bool,\n}\n\nOp_Recv :: struct {\n\tcallback: On_Recv,\n\tsocket:   net.Any_Socket,\n\tbuf:      []byte,\n\tall:      bool,\n\treceived: int,\n\tlen:      int,\n}\n\nOp_Send :: struct {\n\tcallback: On_Sent,\n\tsocket:   net.Any_Socket,\n\tbuf:      []byte,\n\tendpoint: Maybe(net.Endpoint),\n\tall:      bool,\n\tlen:      int,\n\tsent:     int,\n}\n\nOp_Read :: struct {\n\tcallback: On_Read,\n\tfd:       os.Handle,\n\tbuf:      []byte,\n\toffset:\t  int,\n\tall:   \t  bool,\n\tread:  \t  int,\n\tlen:   \t  int,\n}\n\nOp_Write :: struct {\n\tcallback: On_Write,\n\tfd:       os.Handle,\n\tbuf:      []byte,\n\toffset:   int,\n\tall:      bool,\n\twritten:  int,\n\tlen:      int,\n}\n\nOp_Timeout :: struct {\n\tcallback: On_Timeout,\n\texpires:  time.Time,\n}\n\nOp_Next_Tick :: struct {\n\tcallback: On_Next_Tick,\n}\n\nOp_Poll :: struct {\n\tcallback: On_Poll,\n\tfd:       os.Handle,\n\tevent:    Poll_Event,\n\tmulti:    bool,\n}\n\nOp_Poll_Remove :: struct {\n\tfd:    os.Handle,\n\tevent: Poll_Event,\n}\n\nflush :: proc(io: ^IO) -> os.Errno {\n\tevents: [MAX_EVENTS]kqueue.KEvent\n\n\tmin_timeout := flush_timeouts(io)\n\tchange_events := flush_io(io, events[:])\n\n\tif (change_events > 0 || queue.len(io.completed) == 0) {\n\t\tif (change_events == 0 && queue.len(io.completed) == 0 && io.io_inflight == 0) {\n\t\t\treturn os.ERROR_NONE\n\t\t}\n\n\t\tmax_timeout := time.Millisecond * 10\n\t\tts: posix.timespec\n\t\tts.tv_nsec = min(min_timeout.? or_else i64(max_timeout), i64(max_timeout))\n\t\tnew_events, err := kqueue.kevent(io.kq, events[:change_events], events[:], &ts)\n\t\tif err != nil { return ev_err_to_os_err(err) }\n\n\t\t// PERF: this is ordered and O(N), can this be made unordered?\n\t\tremove_range(&io.io_pending, 0, change_events)\n\n\t\tio.io_inflight += change_events\n\t\tio.io_inflight -= int(new_events)\n\n\t\tif new_events > 0 {\n\t\t\tqueue.reserve(&io.completed, int(new_events))\n\t\t\tfor event in events[:new_events] {\n\t\t\t\tcompletion := cast(^Completion)event.udata\n\t\t\t\tqueue.push_back(&io.completed, completion)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Save length so we avoid an infinite loop when there is added to the queue in a callback.\n\tn := queue.len(io.completed)\n\tfor _ in 0 ..< n {\n\t\tcompleted := queue.pop_front(&io.completed)\n\t\tcontext = completed.ctx\n\n\t\tswitch &op in completed.operation {\n\t\tcase Op_Accept:      do_accept     (io, completed, &op)\n\t\tcase Op_Close:       do_close      (io, completed, &op)\n\t\tcase Op_Connect:     do_connect    (io, completed, &op)\n\t\tcase Op_Read:        do_read       (io, completed, &op)\n\t\tcase Op_Recv:        do_recv       (io, completed, &op)\n\t\tcase Op_Send:        do_send       (io, completed, &op)\n\t\tcase Op_Write:       do_write      (io, completed, &op)\n\t\tcase Op_Timeout:     do_timeout    (io, completed, &op)\n\t\tcase Op_Next_Tick:   do_next_tick  (io, completed, &op)\n\t\tcase Op_Poll:        do_poll       (io, completed, &op)\n\t\tcase Op_Poll_Remove: do_poll_remove(io, completed, &op)\n\t\tcase: unreachable()\n\t\t}\n\t}\n\n\treturn os.ERROR_NONE\n}\n\nflush_io :: proc(io: ^IO, events: []kqueue.KEvent) -> int {\n\tevents := events\n\tevents_loop: for &event, i in events {\n\t\tif len(io.io_pending) <= i { return i }\n\t\tcompletion := io.io_pending[i]\n\n\t\tswitch op in completion.operation {\n\t\tcase Op_Accept:\n\t\t\tevent.ident = uintptr(op.sock)\n\t\t\tevent.filter = .Read\n\t\tcase Op_Connect:\n\t\t\tevent.ident = uintptr(op.socket)\n\t\t\tevent.filter = .Write\n\t\tcase Op_Read:\n\t\t\tevent.ident = uintptr(op.fd)\n\t\t\tevent.filter = .Read\n\t\tcase Op_Write:\n\t\t\tevent.ident = uintptr(op.fd)\n\t\t\tevent.filter = .Read\n\t\tcase Op_Recv:\n\t\t\tevent.ident = uintptr(os.Socket(net.any_socket_to_socket(op.socket)))\n\t\t\tevent.filter = .Read\n\t\tcase Op_Send:\n\t\t\tevent.ident = uintptr(os.Socket(net.any_socket_to_socket(op.socket)))\n\t\t\tevent.filter = .Write\n\t\tcase Op_Poll:\n\t\t\tevent.ident = uintptr(op.fd)\n\t\t\tswitch op.event {\n\t\t\tcase .Read:  event.filter = .Read\n\t\t\tcase .Write: event.filter = .Write\n\t\t\tcase:        unreachable()\n\t\t\t}\n\n\t\t\tevent.flags = {.Add, .Enable}\n\t\t\tif !op.multi {\n\t\t\t\tevent.flags += {.One_Shot}\n\t\t\t}\n\n\t\t\tevent.udata = completion\n\n\t\t\tcontinue events_loop\n\t\tcase Op_Poll_Remove:\n\t\t\tevent.ident = uintptr(op.fd)\n\t\t\tswitch op.event {\n\t\t\tcase .Read:  event.filter = .Read\n\t\t\tcase .Write: event.filter = .Write\n\t\t\tcase:        unreachable()\n\t\t\t}\n\n\t\t\tevent.flags = {.Delete, .Disable, .One_Shot}\n\n\t\t\tevent.udata = completion\n\n\t\t\tcontinue events_loop\n\t\tcase Op_Timeout, Op_Close, Op_Next_Tick:\n\t\t\tpanic(\"invalid completion operation queued\")\n\t\t}\n\n\t\tevent.flags = {.Add, .Enable, .One_Shot}\n\t\tevent.udata = completion\n\t}\n\n\treturn len(events)\n}\n\nflush_timeouts :: proc(io: ^IO) -> (min_timeout: Maybe(i64)) {\n\tnow: time.Time\n\t// PERF: is there a faster way to compare time? Or time since program start and compare that?\n\tif len(io.timeouts) > 0 { now = time.now() }\n\n\tfor i := len(io.timeouts) - 1; i >= 0; i -= 1 {\n\t\tcompletion := io.timeouts[i]\n\n\t\ttimeout, ok := &completion.operation.(Op_Timeout)\n\t\tif !ok { panic(\"non-timeout operation found in the timeouts queue\") }\n\n\t\tunow := time.to_unix_nanoseconds(now)\n\t\texpires := time.to_unix_nanoseconds(timeout.expires)\n\t\tif unow >= expires {\n\t\t\tordered_remove(&io.timeouts, i)\n\t\t\tqueue.push_back(&io.completed, completion)\n\t\t\tcontinue\n\t\t}\n\n\t\ttimeout_ns := expires - unow\n\t\tif min, has_min_timeout := min_timeout.(i64); has_min_timeout {\n\t\t\tif timeout_ns < min {\n\t\t\t\tmin_timeout = timeout_ns\n\t\t\t}\n\t\t} else {\n\t\t\tmin_timeout = timeout_ns\n\t\t}\n\t}\n\n\treturn\n}\n\ndo_accept :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {\n\terr: net.Network_Error\n\tclient, source, accept_err := net.accept_tcp(op.sock)\n\tif accept_err == .Would_Block {\n\t\tappend(&io.io_pending, completion)\n\t\treturn\n\t} else if accept_err != nil {\n\t\terr = accept_err\n\t}\n\n\tif err == nil {\n\t\terr = _prepare_socket(client)\n\t}\n\n\tif err != nil {\n\t\tnet.close(client)\n\t\top.callback(completion.user_data, {}, {}, err)\n\t} else {\n\t\top.callback(completion.user_data, client, source, nil)\n\t}\n\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_close :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {\n\tok := os.close(op.handle)\n\n\top.callback(completion.user_data, ok == os.ERROR_NONE)\n\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_connect :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {\n\tdefer op.initiated = true\n\n\terr: os.Errno\n\tif op.initiated {\n\t\t// We have already called os.connect, retrieve error number only.\n\t\tos.getsockopt(os.Socket(op.socket), os.SOL_SOCKET, os.SO_ERROR, &err, size_of(os.Errno))\n\t} else {\n\t\terr = os.connect(os.Socket(op.socket), (^os.SOCKADDR)(&op.sockaddr), i32(op.sockaddr.len))\n\t\tif err == os.EINPROGRESS {\n\t\t\tappend(&io.io_pending, completion)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err != os.ERROR_NONE {\n\t\tnet.close(op.socket)\n\t\top.callback(completion.user_data, {}, net._dial_error())\n\t} else {\n\t\top.callback(completion.user_data, op.socket, nil)\n\t}\n\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_read :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {\n\tread: int\n\terr: os.Errno\n\t//odinfmt:disable\n\tswitch {\n\tcase op.offset >= 0: read, err = os.read_at(op.fd, op.buf, i64(op.offset))\n\tcase:                read, err = os.read(op.fd, op.buf)\n\t}\n\t//odinfmt:enable\n\n\top.read += read\n\n\tif err != os.ERROR_NONE {\n\t\tif err == os.EWOULDBLOCK {\n\t\t\tappend(&io.io_pending, completion)\n\t\t\treturn\n\t\t}\n\n\t\top.callback(completion.user_data, op.read, err)\n\t\tpool_put(&io.completion_pool, completion)\n\t\treturn\n\t}\n\n\tif op.all && op.read < op.len {\n\t\top.buf = op.buf[read:]\n\n\t\tif op.offset >= 0 {\n\t\t\top.offset += read\n\t\t}\n\n\t\tdo_read(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.read, os.ERROR_NONE)\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_recv :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {\n\treceived: int\n\terr: net.Network_Error\n\tremote_endpoint: Maybe(net.Endpoint)\n\tswitch sock in op.socket {\n\tcase net.TCP_Socket:\n\t\treceived, err = net.recv_tcp(sock, op.buf)\n\n\t\tif err == net.TCP_Recv_Error.Would_Block {\n\t\t\tappend(&io.io_pending, completion)\n\t\t\treturn\n\t\t}\n\tcase net.UDP_Socket:\n\t\treceived, remote_endpoint, err = net.recv_udp(sock, op.buf)\n\n\t\tif err == net.UDP_Recv_Error.Would_Block {\n\t\t\tappend(&io.io_pending, completion)\n\t\t\treturn\n\t\t}\n\t}\n\n\top.received += received\n\n\tif err != nil {\n\t\top.callback(completion.user_data, op.received, remote_endpoint, err)\n\t\tpool_put(&io.completion_pool, completion)\n\t\treturn\n\t}\n\n\tif op.all && op.received < op.len {\n\t\top.buf = op.buf[received:]\n\t\tdo_recv(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.received, remote_endpoint, err)\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_send :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {\n\tsent:  u32\n\terrno: os.Errno\n\terr:   net.Network_Error\n\n\tswitch sock in op.socket {\n\tcase net.TCP_Socket:\n\t\tsent, errno = os.send(os.Socket(sock), op.buf, 0)\n\t\tif errno != nil {\n\t\t\terr = net._tcp_send_error()\n\t\t}\n\n\tcase net.UDP_Socket:\n\t\ttoaddr := _endpoint_to_sockaddr(op.endpoint.(net.Endpoint))\n\t\tsent, errno = os.sendto(os.Socket(sock), op.buf, 0, cast(^os.SOCKADDR)&toaddr, i32(toaddr.len))\n\t\tif errno != nil {\n\t\t\terr = net._udp_send_error()\n\t\t}\n\t}\n\n\top.sent += int(sent)\n\n\tif errno != os.ERROR_NONE {\n\t\tif errno == os.EWOULDBLOCK {\n\t\t\tappend(&io.io_pending, completion)\n\t\t\treturn\n\t\t}\n\n\t\top.callback(completion.user_data, op.sent, err)\n\t\tpool_put(&io.completion_pool, completion)\n\t\treturn\n\t}\n\n\tif op.all && op.sent < op.len {\n\t\top.buf = op.buf[sent:]\n\t\tdo_send(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.sent, nil)\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_write :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {\n\twritten: int\n\terr: os.Errno\n\t//odinfmt:disable\n\tswitch {\n\tcase op.offset >= 0: written, err = os.write_at(op.fd, op.buf, i64(op.offset))\n\tcase:                written, err = os.write(op.fd, op.buf)\n\t}\n\t//odinfmt:enable\n\n\top.written += written\n\n\tif err != os.ERROR_NONE {\n\t\tif err == os.EWOULDBLOCK {\n\t\t\tappend(&io.io_pending, completion)\n\t\t\treturn\n\t\t}\n\n\t\top.callback(completion.user_data, op.written, err)\n\t\tpool_put(&io.completion_pool, completion)\n\t\treturn\n\t}\n\n\t// The write did not write the whole buffer, need to write more.\n\tif op.all && op.written < op.len {\n\t\top.buf = op.buf[written:]\n\n\t\t// Increase offset so we don't overwrite what we just wrote.\n\t\tif op.offset >= 0 {\n\t\t\top.offset += written\n\t\t}\n\n\t\tdo_write(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.written, os.ERROR_NONE)\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_timeout :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {\n\top.callback(completion.user_data)\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_poll :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {\n\top.callback(completion.user_data, op.event)\n\tif !op.multi {\n\t\tpool_put(&io.completion_pool, completion)\n\t}\n}\n\ndo_poll_remove :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {\n\tpool_put(&io.completion_pool, completion)\n}\n\ndo_next_tick :: proc(io: ^IO, completion: ^Completion, op: ^Op_Next_Tick) {\n\top.callback(completion.user_data)\n\tpool_put(&io.completion_pool, completion)\n}\n\nkq_err_to_os_err :: proc(err: posix.Errno) -> os.Errno {\n\treturn os.Platform_Error(err)\n}\n\nev_err_to_os_err :: proc(err: posix.Errno) -> os.Errno {\n\treturn os.Platform_Error(err)\n}\n\n// Private proc in net package, verbatim copy.\n_endpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: os.SOCKADDR_STORAGE_LH) {\n\tswitch a in ep.address {\n\tcase net.IP4_Address:\n\t\t(^os.sockaddr_in)(&sockaddr)^ = os.sockaddr_in {\n\t\t\tsin_port   = u16be(ep.port),\n\t\t\tsin_addr   = transmute(os.in_addr)a,\n\t\t\tsin_family = u8(os.AF_INET),\n\t\t\tsin_len    = size_of(os.sockaddr_in),\n\t\t}\n\t\treturn\n\tcase net.IP6_Address:\n\t\t(^os.sockaddr_in6)(&sockaddr)^ = os.sockaddr_in6 {\n\t\t\tsin6_port   = u16be(ep.port),\n\t\t\tsin6_addr   = transmute(os.in6_addr)a,\n\t\t\tsin6_family = u8(os.AF_INET6),\n\t\t\tsin6_len    = size_of(os.sockaddr_in6),\n\t\t}\n\t\treturn\n\t}\n\tunreachable()\n}\n"
  },
  {
    "path": "old_nbio/nbio_internal_linux.odin",
    "content": "#+private\npackage nbio\n\nimport \"base:runtime\"\n\nimport \"core:c\"\nimport \"core:container/queue\"\nimport \"core:fmt\"\nimport \"core:mem\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:sys/linux\"\n\nimport io_uring \"_io_uring\"\n\nNANOSECONDS_PER_SECOND :: 1e+9\n\n_IO :: struct {\n\tring:            io_uring.IO_Uring,\n\tcompletion_pool: Pool(Completion),\n\t// Ready to be submitted to kernel.\n\tunqueued:        queue.Queue(^Completion),\n\t// Ready to run callbacks.\n\tcompleted:       queue.Queue(^Completion),\n\tios_queued:      u64,\n\tios_in_kernel:   u64,\n\tallocator:       mem.Allocator,\n}\n\n_Completion :: struct {\n\tresult:    i32,\n\toperation: Operation,\n\tctx:       runtime.Context,\n}\n\nOp_Accept :: struct {\n\tcallback:    On_Accept,\n\tsocket:      net.TCP_Socket,\n\tsockaddr:    os.SOCKADDR_STORAGE_LH,\n\tsockaddrlen: c.int,\n}\n\nOp_Close :: struct {\n\tcallback: On_Close,\n\tfd:       os.Handle,\n}\n\nOp_Connect :: struct {\n\tcallback: On_Connect,\n\tsocket:   net.TCP_Socket,\n\tsockaddr: os.SOCKADDR_STORAGE_LH,\n}\n\nOp_Read :: struct {\n\tcallback: On_Read,\n\tfd:       os.Handle,\n\tbuf:      []byte,\n\toffset:   int,\n\tall:      bool,\n\tread:     int,\n\tlen:      int,\n}\n\nOp_Write :: struct {\n\tcallback: On_Write,\n\tfd:       os.Handle,\n\tbuf:      []byte,\n\toffset:   int,\n\tall:      bool,\n\twritten:  int,\n\tlen:      int,\n}\n\nOp_Send :: struct {\n\tcallback: On_Sent,\n\tsocket:   net.Any_Socket,\n\tbuf:      []byte,\n\tlen:      int,\n\tsent:     int,\n\tall:      bool,\n}\n\nOp_Recv :: struct {\n\tcallback: On_Recv,\n\tsocket:   net.Any_Socket,\n\tbuf:      []byte,\n\tall:      bool,\n\treceived: int,\n\tlen:      int,\n}\n\nOp_Timeout :: struct {\n\tcallback: On_Timeout,\n\texpires:  linux.Time_Spec,\n}\n\nOp_Next_Tick :: struct {\n\tcallback: On_Next_Tick,\n}\n\nOp_Poll :: struct {\n\tcallback: On_Poll,\n\tfd:       os.Handle,\n\tevent:    Poll_Event,\n\tmulti:    bool,\n}\n\nOp_Poll_Remove :: struct {\n\tfd:    os.Handle,\n\tevent: Poll_Event,\n}\n\nflush :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {\n\terr := flush_submissions(io, wait_nr, timeouts, etime)\n\tif err != os.ERROR_NONE { return err }\n\n\terr = flush_completions(io, 0, timeouts, etime)\n\tif err != os.ERROR_NONE { return err }\n\n\t// Store length at this time, so we don't infinite loop if any of the enqueue\n\t// procs below then add to the queue again.\n\tn := queue.len(io.unqueued)\n\n\t// odinfmt: disable\n\tfor _ in 0..<n {\n\t\tunqueued := queue.pop_front(&io.unqueued)\n\t\tswitch &op in unqueued.operation {\n\t\tcase Op_Accept:      accept_enqueue     (io, unqueued, &op)\n\t\tcase Op_Close:       close_enqueue      (io, unqueued, &op)\n\t\tcase Op_Connect:     connect_enqueue    (io, unqueued, &op)\n\t\tcase Op_Read:        read_enqueue       (io, unqueued, &op)\n\t\tcase Op_Recv:        recv_enqueue       (io, unqueued, &op)\n\t\tcase Op_Send:        send_enqueue       (io, unqueued, &op)\n\t\tcase Op_Write:       write_enqueue      (io, unqueued, &op)\n\t\tcase Op_Timeout:     timeout_enqueue    (io, unqueued, &op)\n\t\tcase Op_Poll:        poll_enqueue       (io, unqueued, &op)\n\t\tcase Op_Poll_Remove: poll_remove_enqueue(io, unqueued, &op)\n\t\tcase Op_Next_Tick:   unreachable()\n\t\t}\n\t}\n\n\tn = queue.len(io.completed)\n\tfor _ in 0 ..< n {\n\t\tcompleted := queue.pop_front(&io.completed)\n\t\tcontext = completed.ctx\n\n\t\tswitch &op in completed.operation {\n\t\tcase Op_Accept:      accept_callback     (io, completed, &op)\n\t\tcase Op_Close:       close_callback      (io, completed, &op)\n\t\tcase Op_Connect:     connect_callback    (io, completed, &op)\n\t\tcase Op_Read:        read_callback       (io, completed, &op)\n\t\tcase Op_Recv:        recv_callback       (io, completed, &op)\n\t\tcase Op_Send:        send_callback       (io, completed, &op)\n\t\tcase Op_Write:       write_callback      (io, completed, &op)\n\t\tcase Op_Timeout:     timeout_callback    (io, completed, &op)\n\t\tcase Op_Poll:        poll_callback       (io, completed, &op)\n\t\tcase Op_Poll_Remove: poll_remove_callback(io, completed, &op)\n\t\tcase Op_Next_Tick:   next_tick_callback  (io, completed, &op)\n\t\tcase: unreachable()\n\t\t}\n\t}\n\t// odinfmt: enable\n\n\treturn os.ERROR_NONE\n}\n\nflush_completions :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {\n\tcqes: [256]io_uring.io_uring_cqe\n\twait_remaining := wait_nr\n\tfor {\n\t\tcompleted, err := io_uring.copy_cqes(&io.ring, cqes[:], wait_remaining)\n\t\tif err != .None { return ring_err_to_os_err(err) }\n\n\t\tif wait_remaining < completed {\n\t\t\twait_remaining = 0\n\t\t} else {\n\t\t\twait_remaining -= completed\n\t\t}\n\n\t\tif completed > 0 {\n\t\t\tqueue.reserve(&io.completed, int(completed))\n\t\t\tfor cqe in cqes[:completed] {\n\t\t\t\tio.ios_in_kernel -= 1\n\n\t\t\t\tif cqe.user_data == 0 {\n\t\t\t\t\ttimeouts^ -= 1\n\n\t\t\t\t\tif (-cqe.res == i32(os.ETIME)) {\n\t\t\t\t\t\tetime^ = true\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcompletion := cast(^Completion)uintptr(cqe.user_data)\n\t\t\t\tcompletion.result = cqe.res\n\n\t\t\t\tqueue.push_back(&io.completed, completion)\n\t\t\t}\n\t\t}\n\n\t\tif completed < len(cqes) { break }\n\t}\n\n\treturn os.ERROR_NONE\n}\n\nflush_submissions :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {\n\tfor {\n\t\tsubmitted, err := io_uring.submit(&io.ring, wait_nr)\n\t\t#partial switch err {\n\t\tcase .None:\n\t\t\tbreak\n\t\tcase .Signal_Interrupt:\n\t\t\tcontinue\n\t\tcase .Completion_Queue_Overcommitted, .System_Resources:\n\t\t\tferr := flush_completions(io, 1, timeouts, etime)\n\t\t\tif ferr != os.ERROR_NONE { return ferr }\n\t\t\tcontinue\n\t\tcase:\n\t\t\treturn ring_err_to_os_err(err)\n\t\t}\n\n\t\tio.ios_queued -= u64(submitted)\n\t\tio.ios_in_kernel += u64(submitted)\n\t\tbreak\n\t}\n\n\treturn os.ERROR_NONE\n}\n\naccept_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {\n\t_, err := io_uring.accept(\n\t\t&io.ring,\n\t\tu64(uintptr(completion)),\n\t\tos.Socket(op.socket),\n\t\tcast(^os.SOCKADDR)&op.sockaddr,\n\t\t&op.sockaddrlen,\n\t)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\naccept_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {\n\tif completion.result < 0 {\n\t\terrno := os.Platform_Error(-completion.result)\n\t\t#partial switch errno {\n\t\tcase .EINTR, .EWOULDBLOCK:\n\t\t\taccept_enqueue(io, completion, op)\n\t\tcase:\n\t\t\top.callback(completion.user_data, 0, {}, net._accept_error(errno))\n\t\t\tpool_put(&io.completion_pool, completion)\n\t\t}\n\t\treturn\n\t}\n\n\tclient := net.TCP_Socket(completion.result)\n\terr    := _prepare_socket(client)\n\tsource := sockaddr_storage_to_endpoint(&op.sockaddr)\n\n\top.callback(completion.user_data, client, source, err)\n\tpool_put(&io.completion_pool, completion)\n}\n\nclose_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {\n\t_, err := io_uring.close(&io.ring, u64(uintptr(completion)), op.fd)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\nclose_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {\n\terrno := os.Platform_Error(-completion.result)\n\n\t// In particular close() should not be retried after an EINTR\n\t// since this may cause a reused descriptor from another thread to be closed.\n\top.callback(completion.user_data, errno == .NONE || errno == .EINTR)\n\tpool_put(&io.completion_pool, completion)\n}\n\nconnect_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {\n\t_, err := io_uring.connect(\n\t\t&io.ring,\n\t\tu64(uintptr(completion)),\n\t\tos.Socket(op.socket),\n\t\tcast(^os.SOCKADDR)&op.sockaddr,\n\t\tsize_of(op.sockaddr),\n\t)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\nconnect_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {\n\terrno := os.Platform_Error(-completion.result)\n\t#partial switch errno {\n\tcase .EINTR, .EWOULDBLOCK:\n\t\tconnect_enqueue(io, completion, op)\n\t\treturn\n\tcase .NONE:\n\t\top.callback(completion.user_data, op.socket, nil)\n\tcase:\n\t\tnet.close(op.socket)\n\t\top.callback(completion.user_data, {}, net._dial_error(errno))\n\t}\n\tpool_put(&io.completion_pool, completion)\n}\n\nread_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {\n\t// Max tells linux to use the file cursor as the offset.\n\toffset := max(u64) if op.offset < 0 else u64(op.offset)\n\n\t_, err := io_uring.read(&io.ring, u64(uintptr(completion)), op.fd, op.buf, offset)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\nread_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {\n\tif completion.result < 0 {\n\t\terrno := os.Platform_Error(-completion.result)\n\t\t#partial switch errno {\n\t\tcase .EINTR, .EWOULDBLOCK:\n\t\t\tread_enqueue(io, completion, op)\n\t\tcase:\n\t\t\top.callback(completion.user_data, op.read, errno)\n\t\t\tpool_put(&io.completion_pool, completion)\n\t\t}\n\t\treturn\n\t}\n\n\top.read += int(completion.result)\n\n\tif op.all && op.read < op.len {\n\t\top.buf = op.buf[completion.result:]\n\t\tread_enqueue(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.read, os.ERROR_NONE)\n\tpool_put(&io.completion_pool, completion)\n}\n\nrecv_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {\n\ttcpsock, ok := op.socket.(net.TCP_Socket)\n\tif !ok {\n\t\t// TODO: figure out and implement.\n\t\tunimplemented(\"UDP recv is unimplemented for linux nbio\")\n\t}\n\n\t_, err := io_uring.recv(&io.ring, u64(uintptr(completion)), os.Socket(tcpsock), op.buf, 0)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\t// TODO: handle other errors, also in other enqueue procs.\n\n\tio.ios_queued += 1\n}\n\nrecv_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {\n\tif completion.result < 0 {\n\t\terrno := os.Platform_Error(-completion.result)\n\t\t#partial switch errno {\n\t\tcase .EINTR, .EWOULDBLOCK:\n\t\t\trecv_enqueue(io, completion, op)\n\t\tcase:\n\t\t\top.callback(completion.user_data, op.received, {}, net._tcp_recv_error(errno))\n\t\t\tpool_put(&io.completion_pool, completion)\n\t\t}\n\t\treturn\n\t}\n\n\top.received += int(completion.result)\n\n\tif op.all && op.received < op.len {\n\t\top.buf = op.buf[completion.result:]\n\t\trecv_enqueue(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.received, {}, nil)\n\tpool_put(&io.completion_pool, completion)\n}\n\nsend_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {\n\ttcpsock, ok := op.socket.(net.TCP_Socket)\n\tif !ok {\n\t\t// TODO: figure out and implement.\n\t\tunimplemented(\"UDP send is unimplemented for linux nbio\")\n\t}\n\n\t_, err := io_uring.send(&io.ring, u64(uintptr(completion)), os.Socket(tcpsock), op.buf, 0)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\nsend_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {\n\tif completion.result < 0 {\n\t\terrno := os.Platform_Error(-completion.result)\n\t\t#partial switch errno {\n\t\tcase .EINTR, .EWOULDBLOCK:\n\t\t\tsend_enqueue(io, completion, op)\n\t\tcase:\n\t\t\top.callback(completion.user_data, op.sent, net._tcp_send_error(errno))\n\t\t\tpool_put(&io.completion_pool, completion)\n\t\t}\n\t\treturn\n\t}\n\n\top.sent += int(completion.result)\n\n\tif op.all && op.sent < op.len {\n\t\top.buf = op.buf[completion.result:]\n\t\tsend_enqueue(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.sent, nil)\n\tpool_put(&io.completion_pool, completion)\n}\n\nwrite_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {\n\t// Max tells linux to use the file cursor as the offset.\n\toffset := max(u64) if op.offset < 0 else u64(op.offset)\n\n\t_, err := io_uring.write(&io.ring, u64(uintptr(completion)), op.fd, op.buf, offset)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\nwrite_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {\n\tif completion.result < 0 {\n\t\terrno := os.Platform_Error(-completion.result)\n\t\t#partial switch errno {\n\t\tcase .EINTR, .EWOULDBLOCK:\n\t\t\twrite_enqueue(io, completion, op)\n\t\tcase:\n\t\t\top.callback(completion.user_data, op.written, errno)\n\t\t\tpool_put(&io.completion_pool, completion)\n\t\t}\n\t\treturn\n\t}\n\n\top.written += int(completion.result)\n\n\tif op.all && op.written < op.len {\n\t\top.buf = op.buf[completion.result:]\n\n\t\tif op.offset >= 0 {\n\t\t\top.offset += int(completion.result)\n\t\t}\n\n\t\twrite_enqueue(io, completion, op)\n\t\treturn\n\t}\n\n\top.callback(completion.user_data, op.written, os.ERROR_NONE)\n\tpool_put(&io.completion_pool, completion)\n}\n\ntimeout_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {\n\t_, err := io_uring.timeout(&io.ring, u64(uintptr(completion)), &op.expires, 0, 0)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\ntimeout_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {\n\tif completion.result < 0 {\n\t\terrno := os.Platform_Error(-completion.result)\n\t\t#partial switch errno {\n\t\tcase .ETIME: // OK.\n\t\tcase .EINTR, .EWOULDBLOCK:\n\t\t\ttimeout_enqueue(io, completion, op)\n\t\t\treturn\n\t\tcase:\n\t\t\tfmt.panicf(\"timeout error: %v\", errno)\n\t\t}\n\t}\n\n\top.callback(completion.user_data)\n\tpool_put(&io.completion_pool, completion)\n}\n\nnext_tick_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Next_Tick) {\n\top.callback(completion.user_data)\n\tpool_put(&io.completion_pool, completion)\n}\n\npoll_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {\n\tevents: linux.Fd_Poll_Events\n\tswitch op.event {\n\tcase .Read:  events = linux.Fd_Poll_Events{.IN}\n\tcase .Write: events = linux.Fd_Poll_Events{.OUT}\n\t}\n\n\tflags: io_uring.IORing_Poll_Flags\n\tif op.multi {\n\t\tflags = io_uring.IORing_Poll_Flags{.ADD_MULTI}\n\t}\n\n\t_, err := io_uring.poll_add(&io.ring, u64(uintptr(completion)), op.fd, events, flags)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\npoll_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {\n\top.callback(completion.user_data, op.event)\n\tif !op.multi {\n\t\tpool_put(&io.completion_pool, completion)\n\t}\n}\n\npoll_remove_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {\n\tevents: linux.Fd_Poll_Events\n\tswitch op.event {\n\tcase .Read:  events = linux.Fd_Poll_Events{.IN}\n\tcase .Write: events = linux.Fd_Poll_Events{.OUT}\n\t}\n\n\t_, err := io_uring.poll_remove(&io.ring, u64(uintptr(completion)), op.fd, events)\n\tif err == .Submission_Queue_Full {\n\t\tqueue.push_back(&io.unqueued, completion)\n\t\treturn\n\t}\n\n\tio.ios_queued += 1\n}\n\npoll_remove_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {\n\tpool_put(&io.completion_pool, completion)\n}\n\nring_err_to_os_err :: proc(err: io_uring.IO_Uring_Error) -> os.Errno {\n\tswitch err {\n\tcase .None:\n\t\treturn os.ERROR_NONE\n\tcase .Params_Outside_Accessible_Address_Space, .Buffer_Invalid, .File_Descriptor_Invalid, .Submission_Queue_Entry_Invalid, .Ring_Shutting_Down:\n\t\treturn os.EFAULT\n\tcase .Arguments_Invalid, .Entries_Zero, .Entries_Too_Large, .Entries_Not_Power_Of_Two, .Opcode_Not_Supported:\n\t\treturn os.EINVAL\n\tcase .Process_Fd_Quota_Exceeded:\n\t\treturn os.EMFILE\n\tcase .System_Fd_Quota_Exceeded:\n\t\treturn os.ENFILE\n\tcase .System_Resources, .Completion_Queue_Overcommitted:\n\t\treturn os.ENOMEM\n\tcase .Permission_Denied:\n\t\treturn os.EPERM\n\tcase .System_Outdated:\n\t\treturn os.ENOSYS\n\tcase .Submission_Queue_Full:\n\t\treturn os.EOVERFLOW\n\tcase .Signal_Interrupt:\n\t\treturn os.EINTR\n\tcase .Unexpected:\n\t\tfallthrough\n\tcase:\n\t\treturn os.Platform_Error(-1)\n\t}\n}\n\n// verbatim copy of net._sockaddr_storage_to_endpoint.\nsockaddr_storage_to_endpoint :: proc(native_addr: ^os.SOCKADDR_STORAGE_LH) -> (ep: net.Endpoint) {\n\tswitch native_addr.ss_family {\n\tcase u16(os.AF_INET):\n\t\taddr := cast(^os.sockaddr_in)native_addr\n\t\tport := int(addr.sin_port)\n\t\tep = net.Endpoint {\n\t\t\taddress = net.IP4_Address(transmute([4]byte)addr.sin_addr),\n\t\t\tport    = port,\n\t\t}\n\tcase u16(os.AF_INET6):\n\t\taddr := cast(^os.sockaddr_in6)native_addr\n\t\tport := int(addr.sin6_port)\n\t\tep = net.Endpoint {\n\t\t\taddress = net.IP6_Address(transmute([8]u16be)addr.sin6_addr),\n\t\t\tport    = port,\n\t\t}\n\tcase:\n\t\tpanic(\"native_addr is neither IP4 or IP6 address\")\n\t}\n\treturn\n}\n\n// verbatim copy of net._endpoint_to_sockaddr.\nendpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: os.SOCKADDR_STORAGE_LH) {\n\tswitch a in ep.address {\n\tcase net.IP4_Address:\n\t\t(^os.sockaddr_in)(&sockaddr)^ = os.sockaddr_in {\n\t\t\tsin_family = u16(os.AF_INET),\n\t\t\tsin_port   = u16be(ep.port),\n\t\t\tsin_addr   = transmute(os.in_addr)a,\n\t\t}\n\t\treturn\n\tcase net.IP6_Address:\n\t\t(^os.sockaddr_in6)(&sockaddr)^ = os.sockaddr_in6 {\n\t\t\tsin6_family = u16(os.AF_INET6),\n\t\t\tsin6_port   = u16be(ep.port),\n\t\t\tsin6_addr   = transmute(os.in6_addr)a,\n\t\t}\n\t\treturn\n\t}\n\tunreachable()\n}\n"
  },
  {
    "path": "old_nbio/nbio_internal_windows.odin",
    "content": "#+private\npackage nbio\n\nimport \"base:runtime\"\n\nimport \"core:container/queue\"\nimport \"core:log\"\nimport \"core:mem\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\n\nimport win \"core:sys/windows\"\n\n_IO :: struct {\n\tiocp:            win.HANDLE,\n\tallocator:       mem.Allocator,\n\ttimeouts:        [dynamic]^Completion,\n\tcompleted:       queue.Queue(^Completion),\n\tcompletion_pool: Pool(Completion),\n\tio_pending:      int,\n\t// The asynchronous Windows API's don't support reading at the current offset of a file, so we keep track ourselves.\n\toffsets:         map[os.Handle]u32,\n}\n\n_Completion :: struct {\n\tover: win.OVERLAPPED,\n\tctx:  runtime.Context,\n\top:   Operation,\n}\n#assert(offset_of(Completion, over) == 0, \"needs to be the first field to work\")\n\nOp_Accept :: struct {\n\tcallback: On_Accept,\n\tsocket:   win.SOCKET,\n\tclient:   win.SOCKET,\n\taddr:     win.SOCKADDR_STORAGE_LH,\n\tpending:  bool,\n}\n\nOp_Connect :: struct {\n\tcallback: On_Connect,\n\tsocket:   win.SOCKET,\n\taddr:     win.SOCKADDR_STORAGE_LH,\n\tpending:  bool,\n}\n\nOp_Close :: struct {\n\tcallback: On_Close,\n\tfd:       Closable,\n}\n\nOp_Read :: struct {\n\tcallback: On_Read,\n\tfd:       os.Handle,\n\toffset:   int,\n\tbuf:      []byte,\n\tpending:  bool,\n\tall:      bool,\n\tread:     int,\n\tlen:      int,\n}\n\nOp_Write :: struct {\n\tcallback: On_Write,\n\tfd:       os.Handle,\n\toffset:   int,\n\tbuf:      []byte,\n\tpending:  bool,\n\n\twritten:  int,\n\tlen:      int,\n\tall:      bool,\n}\n\nOp_Recv :: struct {\n\tcallback: On_Recv,\n\tsocket:   net.Any_Socket,\n\tbuf:      win.WSABUF,\n\tpending:  bool,\n\tall:      bool,\n\treceived: int,\n\tlen:      int,\n}\n\nOp_Send :: struct {\n\tcallback: On_Sent,\n\tsocket:   net.Any_Socket,\n\tbuf:      win.WSABUF,\n\tpending:  bool,\n\n\tlen:      int,\n\tsent:     int,\n\tall:      bool,\n}\n\nOp_Timeout :: struct {\n\tcallback: On_Timeout,\n\texpires:  time.Time,\n}\n\nOp_Next_Tick :: struct {}\n\nOp_Poll :: struct {}\n\nOp_Poll_Remove :: struct {}\n\nflush_timeouts :: proc(io: ^IO) -> (expires: Maybe(time.Duration)) {\n\tcurr: time.Time\n\ttimeout_len := len(io.timeouts)\n\n\t// PERF: could use a faster clock, is getting time since program start fast?\n\tif timeout_len > 0 { curr = time.now() }\n\n\tfor i := 0; i < timeout_len; {\n\t\tcompletion := io.timeouts[i]\n\t\top := &completion.op.(Op_Timeout)\n\t\tcexpires := time.diff(curr, op.expires)\n\n\t\t// Timeout done.\n\t\tif (cexpires <= 0) {\n\t\t\tordered_remove(&io.timeouts, i)\n\t\t\tqueue.push_back(&io.completed, completion)\n\t\t\ttimeout_len -= 1\n\t\t\tcontinue\n\t\t}\n\n\t\t// Update minimum timeout.\n\t\texp, ok := expires.?\n\t\texpires = min(exp, cexpires) if ok else cexpires\n\n\t\ti += 1\n\t}\n\treturn\n}\n\nprepare_socket :: proc(io: ^IO, socket: net.Any_Socket) -> net.Network_Error {\n\tnet.set_option(socket, .Reuse_Address, true) or_return\n\tnet.set_option(socket, .TCP_Nodelay, true) or_return\n\n\thandle := win.HANDLE(uintptr(net.any_socket_to_socket(socket)))\n\n\thandle_iocp := win.CreateIoCompletionPort(handle, io.iocp, 0, 0)\n\tassert(handle_iocp == io.iocp)\n\n\tmode: byte\n\tmode |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS\n\tmode |= FILE_SKIP_SET_EVENT_ON_HANDLE\n\tif !win.SetFileCompletionNotificationModes(handle, mode) {\n\t\treturn net._socket_option_error()\n\t}\n\n\treturn nil\n}\n\nsubmit :: proc(io: ^IO, user: rawptr, op: Operation) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.op = op\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\nhandle_completion :: proc(io: ^IO, completion: ^Completion) {\n\tswitch &op in completion.op {\n\tcase Op_Accept:\n\t\t// TODO: we should directly call the accept callback here, no need for it to be on the Op_Acccept struct.\n\t\tsource, err := accept_callback(io, completion, &op)\n\t\tif wsa_err_incomplete(err) {\n\t\t\tio.io_pending += 1\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil { win.closesocket(op.client) }\n\n\t\top.callback(completion.user_data, net.TCP_Socket(op.client), source, err)\n\n\tcase Op_Connect:\n\t\terr := connect_callback(io, completion, &op)\n\t\tif wsa_err_incomplete(err) {\n\t\t\tio.io_pending += 1\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil { win.closesocket(op.socket) }\n\n\t\top.callback(completion.user_data, net.TCP_Socket(op.socket), err)\n\n\tcase Op_Close:\n\t\top.callback(completion.user_data, close_callback(io, op))\n\n\tcase Op_Read:\n\t\tread, err := read_callback(io, completion, &op)\n\t\tif err_incomplete(err) {\n\t\t\tio.io_pending += 1\n\t\t\treturn\n\t\t}\n\n\t\tif err == win.ERROR_HANDLE_EOF {\n\t\t\terr = win.NO_ERROR\n\t\t}\n\n\t\top.read += int(read)\n\n\t\tif err != win.NO_ERROR {\n\t\t\top.callback(completion.user_data, op.read, os.Platform_Error(err))\n\t\t} else if op.all && op.read < op.len {\n\t\t\top.buf = op.buf[read:]\n\n\t\t\tif op.offset >= 0 {\n\t\t\t\top.offset += int(read)\n\t\t\t}\n\n\t\t\top.pending = false\n\n\t\t\thandle_completion(io, completion)\n\t\t\treturn\n\t\t} else {\n\t\t\top.callback(completion.user_data, op.read, os.ERROR_NONE)\n\t\t}\n\n\tcase Op_Write:\n\t\twritten, err := write_callback(io, completion, &op)\n\t\tif err_incomplete(err) {\n\t\t\tio.io_pending += 1\n\t\t\treturn\n\t\t}\n\n\t\top.written += int(written)\n\n\t\toerr := os.Platform_Error(err)\n\t\tif oerr != os.ERROR_NONE {\n\t\t\top.callback(completion.user_data, op.written, oerr)\n\t\t} else if op.all && op.written < op.len {\n\t\t\top.buf = op.buf[written:]\n\n\t\t\tif op.offset >= 0 {\n\t\t\t\top.offset += int(written)\n\t\t\t}\n\n\t\t\top.pending = false\n\n\t\t\thandle_completion(io, completion)\n\t\t\treturn\n\t\t} else {\n\t\t\top.callback(completion.user_data, op.written, os.ERROR_NONE)\n\t\t}\n\n\tcase Op_Recv:\n\t\treceived, err := recv_callback(io, completion, &op)\n\t\tif wsa_err_incomplete(err) {\n\t\t\tio.io_pending += 1\n\t\t\treturn\n\t\t}\n\n\t\top.received += int(received)\n\n\t\tif err != nil {\n\t\t\top.callback(completion.user_data, op.received, {}, err)\n\t\t} else if op.all && op.received < op.len {\n\t\t\top.buf = win.WSABUF{\n\t\t\t\tlen = op.buf.len - win.ULONG(received),\n\t\t\t\tbuf = (cast([^]byte)op.buf.buf)[received:],\n\t\t\t}\n\t\t\top.pending = false\n\n\t\t\thandle_completion(io, completion)\n\t\t\treturn\n\t\t} else {\n\t\t\top.callback(completion.user_data, op.received, {}, nil)\n\t\t}\n\n\tcase Op_Send:\n\t\tsent, err := send_callback(io, completion, &op)\n\t\tif wsa_err_incomplete(err) {\n\t\t\tio.io_pending += 1\n\t\t\treturn\n\t\t}\n\n\t\top.sent += int(sent)\n\n\t\tif err != nil {\n\t\t\top.callback(completion.user_data, op.sent, err)\n\t\t} else if op.all && op.sent < op.len {\n\t\t\top.buf = win.WSABUF{\n\t\t\t\tlen = op.buf.len - win.ULONG(sent),\n\t\t\t\tbuf = (cast([^]byte)op.buf.buf)[sent:],\n\t\t\t}\n\t\t\top.pending = false\n\n\t\t\thandle_completion(io, completion)\n\t\t\treturn\n\t\t} else {\n\t\t\top.callback(completion.user_data, op.sent, nil)\n\t\t}\n\n\tcase Op_Timeout:\n\t\top.callback(completion.user_data)\n\n\tcase Op_Next_Tick, Op_Poll, Op_Poll_Remove:\n\t\tunreachable()\n\n\t}\n\tpool_put(&io.completion_pool, completion)\n}\n\naccept_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Accept) -> (source: net.Endpoint, err: net.Network_Error) {\n\tok: win.BOOL\n\tif op.pending {\n\t\t// Get status update, we've already initiated the accept.\n\t\tflags: win.DWORD\n\t\ttransferred: win.DWORD\n\t\tok = win.WSAGetOverlappedResult(op.socket, &comp.over, &transferred, win.FALSE, &flags)\n\t} else {\n\t\top.pending = true\n\n\t\toclient: net.Any_Socket\n\t\toclient, err = open_socket(io, .IP4, .TCP)\n\n\t\tif err != nil { return }\n\n\t\top.client = win.SOCKET(net.any_socket_to_socket(oclient))\n\n\t\taccept_ex: LPFN_ACCEPTEX\n\t\tload_socket_fn(op.socket, win.WSAID_ACCEPTEX, &accept_ex)\n\n\t\t#assert(size_of(win.SOCKADDR_STORAGE_LH) >= size_of(win.sockaddr_in) + 16)\n\t\tbytes_read: win.DWORD\n\t\tok = accept_ex(\n\t\t\top.socket,\n\t\t\top.client,\n\t\t\t&op.addr,\n\t\t\t0,\n\t\t\tsize_of(win.sockaddr_in) + 16,\n\t\t\tsize_of(win.sockaddr_in) + 16,\n\t\t\t&bytes_read,\n\t\t\t&comp.over,\n\t\t)\n\t}\n\n\tif !ok {\n\t\terr = net._accept_error()\n\t\treturn\n\t}\n\n\t// enables getsockopt, setsockopt, getsockname, getpeername.\n\twin.setsockopt(op.client, win.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, nil, 0)\n\n\tsource = sockaddr_to_endpoint(&op.addr)\n\treturn\n}\n\nconnect_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Connect) -> (err: net.Network_Error) {\n\ttransferred: win.DWORD\n\tok: win.BOOL\n\tif op.pending {\n\t\tflags: win.DWORD\n\t\tok = win.WSAGetOverlappedResult(op.socket, &comp.over, &transferred, win.FALSE, &flags)\n\t} else {\n\t\top.pending = true\n\n\t\tosocket: net.Any_Socket\n\t\tosocket, err = open_socket(io, .IP4, .TCP)\n\n\t\tif err != nil { return }\n\n\t\top.socket = win.SOCKET(net.any_socket_to_socket(osocket))\n\n\t\tsockaddr := endpoint_to_sockaddr({net.IP4_Any, 0})\n\t\tres := win.bind(op.socket, &sockaddr, size_of(sockaddr))\n\t\tif res < 0 { return net._bind_error() }\n\n\t\tconnect_ex: LPFN_CONNECTEX\n\t\tload_socket_fn(op.socket, WSAID_CONNECTEX, &connect_ex)\n\t\t// TODO: size_of(win.sockaddr_in6) when ip6.\n\t\tok = connect_ex(op.socket, &op.addr, size_of(win.sockaddr_in) + 16, nil, 0, &transferred, &comp.over)\n\t}\n\tif !ok { return net._dial_error() }\n\n\t// enables getsockopt, setsockopt, getsockname, getpeername.\n\twin.setsockopt(op.socket, win.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, nil, 0)\n\treturn\n}\n\nclose_callback :: proc(io: ^IO, op: Op_Close) -> bool {\n\t// NOTE: This might cause problems if there is still IO queued/pending.\n\t// Is that our responsibility to check/keep track of?\n\t// Might want to call win.CancelloEx to cancel all pending operations first.\n\n\tswitch h in op.fd {\n\tcase os.Handle:\n\t\tdelete_key(&io.offsets, h)\n\t\treturn win.CloseHandle(win.HANDLE(h)) == true\n\tcase net.TCP_Socket:\n\t\treturn win.closesocket(win.SOCKET(h)) == win.NO_ERROR\n\tcase net.UDP_Socket:\n\t\treturn win.closesocket(win.SOCKET(h)) == win.NO_ERROR\n\tcase net.Socket:\n\t\treturn win.closesocket(win.SOCKET(h)) == win.NO_ERROR\n\tcase:\n\t\tunreachable()\n\t}\n}\n\nread_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Read) -> (read: win.DWORD, err: win.DWORD) {\n\tok: win.BOOL\n\tif op.pending {\n\t\tok = win.GetOverlappedResult(win.HANDLE(op.fd), &comp.over, &read, win.FALSE)\n\t} else {\n\t\tcomp.over.Offset = u32(op.offset) if op.offset >= 0 else io.offsets[op.fd]\n\t\tcomp.over.OffsetHigh = comp.over.Offset >> 32\n\n\t\tok = win.ReadFile(win.HANDLE(op.fd), raw_data(op.buf), win.DWORD(len(op.buf)), &read, &comp.over)\n\n\t\t// Not sure if this also happens with correctly set up handles some times.\n\t\tif ok { log.info(\"non-blocking read returned immediately, is the handle set up correctly?\") }\n\n\t\top.pending = true\n\t}\n\n\tif !ok { err = win.GetLastError() }\n\n\t// Increment offset if this was not a call with an offset set.\n\tif op.offset >= 0 {\n\t\tio.offsets[op.fd] += read\n\t}\n\n\treturn\n}\n\nwrite_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Write) -> (written: win.DWORD, err: win.DWORD) {\n\tok: win.BOOL\n\tif op.pending {\n\t\tok = win.GetOverlappedResult(win.HANDLE(op.fd), &comp.over, &written, win.FALSE)\n\t} else {\n\t\tcomp.over.Offset = u32(op.offset) if op.offset >= 0 else io.offsets[op.fd]\n\t\tcomp.over.OffsetHigh = comp.over.Offset >> 32\n\t\tok = win.WriteFile(win.HANDLE(op.fd), raw_data(op.buf), win.DWORD(len(op.buf)), &written, &comp.over)\n\n\t\t// Not sure if this also happens with correctly set up handles some times.\n\t\tif ok { log.debug(\"non-blocking write returned immediately, is the handle set up correctly?\") }\n\n\t\top.pending = true\n\t}\n\n\tif !ok { err = win.GetLastError() }\n\n\t// Increment offset if this was not a call with an offset set.\n\tif op.offset >= 0 {\n\t\tio.offsets[op.fd] += written\n\t}\n\n\treturn\n}\n\nrecv_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Recv) -> (received: win.DWORD, err: net.TCP_Recv_Error) {\n\tsock := win.SOCKET(net.any_socket_to_socket(op.socket))\n\tok: win.BOOL\n\tif op.pending {\n\t\tflags: win.DWORD\n\t\tok = win.WSAGetOverlappedResult(sock, &comp.over, &received, win.FALSE, &flags)\n\t} else {\n\t\tflags: win.DWORD\n\t\terr_code := win.WSARecv(sock, &op.buf, 1, &received, &flags, win.LPWSAOVERLAPPED(&comp.over), nil)\n\t\tok = err_code != win.SOCKET_ERROR\n\t\top.pending = true\n\t}\n\n\tif !ok { err = net._tcp_recv_error() }\n\treturn\n}\n\nsend_callback :: proc(io: ^IO, comp: ^Completion, op: ^Op_Send) -> (sent: win.DWORD, err: net.TCP_Send_Error) {\n\tsock := win.SOCKET(net.any_socket_to_socket(op.socket))\n\tok: win.BOOL\n\tif op.pending {\n\t\tflags: win.DWORD\n\t\tok = win.WSAGetOverlappedResult(sock, &comp.over, &sent, win.FALSE, &flags)\n\t} else {\n\t\terr_code := win.WSASend(sock, &op.buf, 1, &sent, 0, win.LPWSAOVERLAPPED(&comp.over), nil)\n\t\tok = err_code != win.SOCKET_ERROR\n\t\top.pending = true\n\t}\n\n\tif !ok { err = net._tcp_send_error() }\n\treturn\n}\n\nFILE_SKIP_COMPLETION_PORT_ON_SUCCESS :: 0x1\nFILE_SKIP_SET_EVENT_ON_HANDLE :: 0x2\n\nSO_UPDATE_ACCEPT_CONTEXT :: 28683\n\nWSAID_CONNECTEX :: win.GUID{0x25a207b9, 0xddf3, 0x4660, [8]win.BYTE{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}}\n\nLPFN_CONNECTEX :: #type proc \"stdcall\" (\n\tsocket: win.SOCKET,\n\taddr: ^win.SOCKADDR_STORAGE_LH,\n\tnamelen: win.c_int,\n\tsend_buf: win.PVOID,\n\tsend_data_len: win.DWORD,\n\tbytes_sent: win.LPDWORD,\n\toverlapped: win.LPOVERLAPPED,\n) -> win.BOOL\n\nLPFN_ACCEPTEX :: #type proc \"stdcall\" (\n\tlisten_sock: win.SOCKET,\n\taccept_sock: win.SOCKET,\n\taddr_buf: win.PVOID,\n\taddr_len: win.DWORD,\n\tlocal_addr_len: win.DWORD,\n\tremote_addr_len: win.DWORD,\n\tbytes_received: win.LPDWORD,\n\toverlapped: win.LPOVERLAPPED,\n) -> win.BOOL\n\nwsa_err_incomplete :: proc(err: $T) -> bool {\n\twhen T == net.Dial_Error {\n\t\tif err == .Already_Connecting {\n\t\t\treturn true\n\t\t}\n\t}\n\n\twhen T != net.Network_Error {\n\t\tif err == .Would_Block {\n\t\t\treturn true\n\t\t} else if err != .Unknown {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tlast := win.System_Error(net.last_platform_error())\n\t#partial switch last {\n\tcase .WSAEWOULDBLOCK, .IO_PENDING, .IO_INCOMPLETE, .WSAEALREADY: return true\n\tcase: return false\n\t}\n}\n\nerr_incomplete :: proc(err: win.DWORD) -> bool {\n\treturn err == win.ERROR_IO_PENDING\n}\n\n// Verbatim copy of private proc in core:net.\nsockaddr_to_endpoint :: proc(native_addr: ^win.SOCKADDR_STORAGE_LH) -> (ep: net.Endpoint) {\n\tswitch native_addr.ss_family {\n\tcase u16(win.AF_INET):\n\t\taddr := cast(^win.sockaddr_in)native_addr\n\t\tport := int(addr.sin_port)\n\t\tep = net.Endpoint {\n\t\t\taddress = net.IP4_Address(transmute([4]byte)addr.sin_addr),\n\t\t\tport    = port,\n\t\t}\n\tcase u16(win.AF_INET6):\n\t\taddr := cast(^win.sockaddr_in6)native_addr\n\t\tport := int(addr.sin6_port)\n\t\tep = net.Endpoint {\n\t\t\taddress = net.IP6_Address(transmute([8]u16be)addr.sin6_addr),\n\t\t\tport    = port,\n\t\t}\n\tcase:\n\t\tpanic(\"native_addr is neither IP4 or IP6 address\")\n\t}\n\treturn\n}\n\n// Verbatim copy of private proc in core:net.\nendpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: win.SOCKADDR_STORAGE_LH) {\n\tswitch a in ep.address {\n\tcase net.IP4_Address:\n\t\t(^win.sockaddr_in)(&sockaddr)^ = win.sockaddr_in {\n\t\t\tsin_port   = u16be(win.USHORT(ep.port)),\n\t\t\tsin_addr   = transmute(win.in_addr)a,\n\t\t\tsin_family = u16(win.AF_INET),\n\t\t}\n\t\treturn\n\tcase net.IP6_Address:\n\t\t(^win.sockaddr_in6)(&sockaddr)^ = win.sockaddr_in6 {\n\t\t\tsin6_port   = u16be(win.USHORT(ep.port)),\n\t\t\tsin6_addr   = transmute(win.in6_addr)a,\n\t\t\tsin6_family = u16(win.AF_INET6),\n\t\t}\n\t\treturn\n\t}\n\tunreachable()\n}\n\n// TODO: loading this takes a overlapped parameter, maybe we can do this async?\nload_socket_fn :: proc(subject: win.SOCKET, guid: win.GUID, fn: ^$T) {\n\tguid := guid\n\tbytes: u32\n\trc := win.WSAIoctl(\n\t\tsubject,\n\t\twin.SIO_GET_EXTENSION_FUNCTION_POINTER,\n\t\t&guid,\n\t\tsize_of(guid),\n\t\tfn,\n\t\tsize_of(fn),\n\t\t&bytes,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert(rc != win.SOCKET_ERROR)\n\tassert(bytes == size_of(fn^))\n}\n"
  },
  {
    "path": "old_nbio/nbio_linux.odin",
    "content": "package nbio\n\nimport \"core:container/queue\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:sys/linux\"\nimport \"core:time\"\n\nimport io_uring \"_io_uring\"\n\n_init :: proc(io: ^IO, alloc := context.allocator) -> (err: os.Errno) {\n\tflags: u32 = 0\n\tentries: u32 = 256\n\n\tio.allocator = alloc\n\n\tpool_init(&io.completion_pool, allocator = alloc)\n\n\tparams: io_uring.io_uring_params\n\n\t// Make read, write etc. increment and use the file cursor.\n\tparams.features |= io_uring.IORING_FEAT_RW_CUR_POS\n\n\tring, rerr := io_uring.io_uring_make(&params, entries, flags)\n\t#partial switch rerr {\n\tcase .None:\n\t\tio.ring = ring\n\t\tqueue.init(&io.unqueued, allocator = alloc)\n\t\tqueue.init(&io.completed, allocator = alloc)\n\tcase:\n\t\terr = ring_err_to_os_err(rerr)\n\t}\n\n\treturn\n}\n\n_num_waiting :: #force_inline proc(io: ^IO) -> int {\n\treturn io.completion_pool.num_waiting\n}\n\n_destroy :: proc(io: ^IO) {\n\tcontext.allocator = io.allocator\n\n\tqueue.destroy(&io.unqueued)\n\tqueue.destroy(&io.completed)\n\tpool_destroy(&io.completion_pool)\n\tio_uring.io_uring_destroy(&io.ring)\n}\n\n_tick :: proc(io: ^IO) -> os.Errno {\n\ttimeouts: uint = 0\n\tetime := false\n\n\tt: linux.Time_Spec\n\tt.time_nsec += uint(time.Millisecond * 10)\n\n\tfor !etime {\n\t\t// Queue the timeout, if there is an error, flush (cause its probably full) and try again.\n\t\tsqe, err := io_uring.timeout(&io.ring, 0, &t, 1, 0)\n\t\tif err != nil {\n\t\t\tif errno := flush_submissions(io, 0, &timeouts, &etime); errno != os.ERROR_NONE {\n\t\t\t\treturn errno\n\t\t\t}\n\n\t\t\tsqe, err = io_uring.timeout(&io.ring, 0, &t, 1, 0)\n\t\t}\n\t\tif err != nil { return ring_err_to_os_err(err) }\n\n\t\ttimeouts += 1\n\t\tio.ios_queued += 1\n\n\t\tferr := flush(io, 1, &timeouts, &etime)\n\t\tif ferr != os.ERROR_NONE { return ferr }\n\t}\n\n\tfor timeouts > 0 {\n\t\tfcerr := flush_completions(io, 0, &timeouts, &etime)\n\t\tif fcerr != os.ERROR_NONE { return fcerr }\n\t}\n\n\treturn os.ERROR_NONE\n}\n\n_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> net.Network_Error {\n\terrno := linux.listen(linux.Fd(socket), i32(backlog))\n\tif errno != nil {\n\t\treturn net._listen_error(errno)\n\t}\n\treturn nil\n}\n\n_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Accept {\n\t\tcallback    = callback,\n\t\tsocket      = socket,\n\t\tsockaddrlen = i32(size_of(os.SOCKADDR_STORAGE_LH)),\n\t}\n\n\taccept_enqueue(io, completion, &completion.operation.(Op_Accept))\n\treturn completion\n}\n\n_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\n\thandle: os.Handle\n\t//odinfmt:disable\n\tswitch h in fd {\n\tcase net.TCP_Socket: handle = os.Handle(h)\n\tcase net.UDP_Socket: handle = os.Handle(h)\n\tcase net.Socket:     handle = os.Handle(h)\n\tcase os.Handle:      handle = h\n\t} //odinfmt:enable\n\n\tcompletion.operation = Op_Close {\n\t\tcallback = callback,\n\t\tfd       = handle,\n\t}\n\n\tclose_enqueue(io, completion, &completion.operation.(Op_Close))\n\treturn completion\n}\n\n_connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {\n\tif endpoint.port == 0 {\n\t\treturn nil, net.Dial_Error.Port_Required\n\t}\n\n\tfamily := net.family_from_endpoint(endpoint)\n\tsock, err := net.create_socket(family, .TCP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif preperr := _prepare_socket(sock); err != nil {\n\t\tclose(io, net.any_socket_to_socket(sock))\n\t\treturn nil, preperr\n\t}\n\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Connect {\n\t\tcallback = callback,\n\t\tsocket   = sock.(net.TCP_Socket),\n\t\tsockaddr = endpoint_to_sockaddr(endpoint),\n\t}\n\n\tconnect_enqueue(io, completion, &completion.operation.(Op_Connect))\n\treturn completion, nil\n}\n\n_read :: proc(\n\tio: ^IO,\n\tfd: os.Handle,\n\toffset: Maybe(int),\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Read,\n\tall := false,\n) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Read {\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\tbuf      = buf,\n\t\toffset   = offset.? or_else -1,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\tread_enqueue(io, completion, &completion.operation.(Op_Read))\n\treturn completion\n}\n\n_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Recv {\n\t\tcallback = callback,\n\t\tsocket   = socket,\n\t\tbuf      = buf,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\trecv_enqueue(io, completion, &completion.operation.(Op_Recv))\n\treturn completion\n}\n\n_send :: proc(\n\tio: ^IO,\n\tsocket: net.Any_Socket,\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Sent,\n\t_: Maybe(net.Endpoint) = nil,\n\tall := false,\n) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Send {\n\t\tcallback = callback,\n\t\tsocket   = socket,\n\t\tbuf      = buf,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\tsend_enqueue(io, completion, &completion.operation.(Op_Send))\n\treturn completion\n}\n\n_write :: proc(\n\tio: ^IO,\n\tfd: os.Handle,\n\toffset: Maybe(int),\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Write,\n\tall := false,\n) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\tcompletion.operation = Op_Write {\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\tbuf      = buf,\n\t\toffset   = offset.? or_else -1,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t}\n\n\twrite_enqueue(io, completion, &completion.operation.(Op_Write))\n\treturn completion\n}\n\n_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\n\tnsec := time.duration_nanoseconds(dur)\n\tcompletion.operation = Op_Timeout {\n\t\tcallback = callback,\n\t\texpires = linux.Time_Spec{\n\t\t\ttime_sec  = uint(nsec / NANOSECONDS_PER_SECOND),\n\t\t\ttime_nsec = uint(nsec % NANOSECONDS_PER_SECOND),\n\t\t},\n\t}\n\n\ttimeout_enqueue(io, completion, &completion.operation.(Op_Timeout))\n\treturn completion\n}\n\n_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\n\tcompletion.operation = Op_Next_Tick {\n\t\tcallback = callback,\n\t}\n\n\tqueue.push_back(&io.completed, completion)\n\treturn completion\n}\n\n_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.user_data = user\n\n\tcompletion.operation = Op_Poll{\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\tevent    = event,\n\t\tmulti    = multi,\n\t}\n\n\tpoll_enqueue(io, completion, &completion.operation.(Op_Poll))\n\treturn completion\n}\n\n_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.ctx = context\n\tcompletion.operation = Op_Poll_Remove{\n\t\tfd    = fd,\n\t\tevent = event,\n\t}\n\n\tpoll_remove_enqueue(io, completion, &completion.operation.(Op_Poll_Remove))\n\treturn completion\n}\n"
  },
  {
    "path": "old_nbio/nbio_test.odin",
    "content": "package nbio\n\nimport \"core:fmt\"\nimport \"core:log\"\nimport \"core:mem\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:slice\"\nimport \"core:testing\"\nimport \"core:time\"\n\nexpect :: testing.expect\n\n@(test)\ntest_timeout :: proc(t: ^testing.T) {\n\tio: IO\n\n\tierr := init(&io)\n\texpect(t, ierr == os.ERROR_NONE, fmt.tprintf(\"nbio.init error: %v\", ierr))\n\n\tdefer destroy(&io)\n\n\ttimeout_fired: bool\n\n\ttimeout(&io, time.Millisecond * 10, &timeout_fired, proc(t_: rawptr) {\n\t\ttimeout_fired := cast(^bool)t_\n\t\ttimeout_fired^ = true\n\t})\n\n\tstart := time.now()\n\tfor {\n\t\tterr := tick(&io)\n\t\texpect(t, terr == os.ERROR_NONE, fmt.tprintf(\"nbio.tick error: %v\", terr))\n\n\t\tif time.since(start) > time.Millisecond * 11 {\n\t\t\texpect(t, timeout_fired, \"timeout did not run in time\")\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n@(test)\ntest_write_read_close :: proc(t: ^testing.T) {\n\ttrack: mem.Tracking_Allocator\n\tmem.tracking_allocator_init(&track, context.allocator)\n\tcontext.allocator = mem.tracking_allocator(&track)\n\tdefer mem.tracking_allocator_destroy(&track)\n\n\tdefer {\n\t\tfor _, leak in track.allocation_map {\n\t\t\tfmt.printf(\"%v leaked %v bytes\\n\", leak.location, leak.size)\n\t\t}\n\n\t\tfor bad_free in track.bad_free_array {\n\t\t\tfmt.printf(\"%v allocation %p was freed badly\\n\", bad_free.location, bad_free.memory)\n\t\t}\n\t}\n\n\t{\n\t\tTest_Ctx :: struct {\n\t\t\tt:         ^testing.T,\n\t\t\tio:        ^IO,\n\t\t\tdone:      bool,\n\t\t\tfd:        os.Handle,\n\t\t\twrite_buf: [20]byte,\n\t\t\tread_buf:  [20]byte,\n\t\t\twritten:   int,\n\t\t\tread:      int,\n\t\t}\n\n\t\tio: IO\n\t\tinit(&io)\n\t\tdefer destroy(&io)\n\n\t\ttctx := Test_Ctx {\n\t\t\twrite_buf = [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},\n\t\t\tread_buf = [20]byte{},\n\t\t}\n\t\ttctx.t = t\n\t\ttctx.io = &io\n\n\t\tpath := \"test_write_read_close\"\n\t\thandle, errno := open(\n\t\t\t&io,\n\t\t\tpath,\n\t\t\tos.O_RDWR | os.O_CREATE | os.O_TRUNC,\n\t\t\tos.S_IRUSR | os.S_IWUSR | os.S_IRGRP | os.S_IROTH when ODIN_OS != .Windows else 0,\n\t\t)\n\t\texpect(t, errno == os.ERROR_NONE, fmt.tprintf(\"open file error: %i\", errno))\n\t\tdefer close(&io, handle)\n\t\tdefer os.remove(path)\n\n\t\ttctx.fd = handle\n\n\t\twrite(&io, handle, tctx.write_buf[:], &tctx, write_callback)\n\n\t\tfor !tctx.done {\n\t\t\tterr := tick(&io)\n\t\t\texpect(t, terr == os.ERROR_NONE, fmt.tprintf(\"error ticking: %v\", terr))\n\t\t}\n\n\t\texpect(t, tctx.read == 20, \"expected to have read 20 bytes\")\n\t\texpect(t, tctx.written == 20, \"expected to have written 20 bytes\")\n\t\texpect(t, slice.equal(tctx.write_buf[:], tctx.read_buf[:]))\n\n\t\twrite_callback :: proc(ctx: rawptr, written: int, err: os.Errno) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\t\t\texpect(ctx.t, err == os.ERROR_NONE, fmt.tprintf(\"write error: %i\", err))\n\n\t\t\tctx.written = written\n\n\t\t\tread_at(ctx.io, ctx.fd, 0, ctx.read_buf[:], ctx, read_callback)\n\t\t}\n\n\t\tread_callback :: proc(ctx: rawptr, r: int, err: os.Errno) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\t\t\texpect(ctx.t, err == os.ERROR_NONE, fmt.tprintf(\"read error: %i\", err))\n\n\t\t\tctx.read = r\n\n\t\t\tclose(ctx.io, ctx.fd, ctx, close_callback)\n\t\t}\n\n\t\tclose_callback :: proc(ctx: rawptr, ok: bool) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\t\t\texpect(ctx.t, ok, \"close error\")\n\n\t\t\tctx.done = true\n\t\t}\n\t}\n}\n\n@(test)\ntest_client_and_server_send_recv :: proc(t: ^testing.T) {\n\ttrack: mem.Tracking_Allocator\n\tmem.tracking_allocator_init(&track, context.allocator)\n\tcontext.allocator = mem.tracking_allocator(&track)\n\tdefer mem.tracking_allocator_destroy(&track)\n\n\tdefer {\n\t\tfor _, leak in track.allocation_map {\n\t\t\tfmt.printf(\"%v leaked %v bytes\\n\", leak.location, leak.size)\n\t\t}\n\n\t\tfor bad_free in track.bad_free_array {\n\t\t\tfmt.printf(\"%v allocation %p was freed badly\\n\", bad_free.location, bad_free.memory)\n\t\t}\n\t}\n\n\t{\n\t\tTest_Ctx :: struct {\n\t\t\tt:             ^testing.T,\n\t\t\tio:            ^IO,\n\t\t\tsend_buf:      []byte,\n\t\t\trecv_buf:      []byte,\n\t\t\tsent:          int,\n\t\t\treceived:      int,\n\t\t\taccepted_sock: Maybe(net.TCP_Socket),\n\t\t\tdone:          bool,\n\t\t\tep:            net.Endpoint,\n\t\t}\n\n\t\tio: IO\n\t\tinit(&io)\n\t\tdefer destroy(&io)\n\n\t\ttctx := Test_Ctx {\n\t\t\tsend_buf = []byte{1, 0, 1, 0, 1, 0, 1, 0, 1, 0},\n\t\t\trecv_buf = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n\t\t}\n\t\ttctx.t = t\n\t\ttctx.io = &io\n\n\t\ttctx.ep = {\n\t\t\taddress = net.IP4_Loopback,\n\t\t\tport    = 3131,\n\t\t}\n\n\t\tserver, err := open_and_listen_tcp(&io, tctx.ep)\n\t\texpect(t, err == nil, fmt.tprintf(\"create socket error: %s\", err))\n\n\t\taccept(&io, server, &tctx, accept_callback)\n\n\t\tterr := tick(&io)\n\t\texpect(t, terr == os.ERROR_NONE, fmt.tprintf(\"tick error: %v\", terr))\n\n\t\tconnect(&io, tctx.ep, &tctx, connect_callback)\n\n\t\tfor !tctx.done {\n\t\t\tterr = tick(&io)\n\t\t\texpect(t, terr == os.ERROR_NONE, fmt.tprintf(\"tick error: %v\", terr))\n\t\t}\n\n\t\texpect(\n\t\t\tt,\n\t\t\tlen(tctx.send_buf) == int(tctx.sent),\n\t\t\tfmt.tprintf(\"expected sent to be length of buffer: %i != %i\", len(tctx.send_buf), tctx.sent),\n\t\t)\n\t\texpect(\n\t\t\tt,\n\t\t\tlen(tctx.recv_buf) == int(tctx.received),\n\t\t\tfmt.tprintf(\"expected recv to be length of buffer: %i != %i\", len(tctx.recv_buf), tctx.received),\n\t\t)\n\n\t\texpect(\n\t\t\tt,\n\t\t\tslice.equal(tctx.send_buf[:tctx.received], tctx.recv_buf),\n\t\t\tfmt.tprintf(\"send and received not the same: %v != %v\", tctx.send_buf[:tctx.received], tctx.recv_buf),\n\t\t)\n\n\t\tconnect_callback :: proc(ctx: rawptr, sock: net.TCP_Socket, err: net.Network_Error) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\n\t\t\t// I believe this is because we are connecting in the same tick as accepting\n\t\t\t// and it goes wrong, might actually be a bug though, can't find anything.\n\t\t\tif err != nil {\n\t\t\t\tlog.info(\"connect err, trying again\", err)\n\t\t\t\tconnect(ctx.io, ctx.ep, ctx, connect_callback)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsend(ctx.io, sock, ctx.send_buf, ctx, send_callback)\n\t\t}\n\n\t\tsend_callback :: proc(ctx: rawptr, res: int, err: net.Network_Error) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\t\t\texpect(ctx.t, err == nil, fmt.tprintf(\"send error: %i\", err))\n\n\t\t\tctx.sent = res\n\t\t}\n\n\t\taccept_callback :: proc(ctx: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\t\t\texpect(ctx.t, err == nil, fmt.tprintf(\"accept error: %i\", err))\n\n\t\t\tctx.accepted_sock = client\n\n\t\t\trecv(ctx.io, client, ctx.recv_buf, ctx, recv_callback)\n\t\t}\n\n\t\trecv_callback :: proc(ctx: rawptr, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {\n\t\t\tctx := cast(^Test_Ctx)ctx\n\t\t\texpect(ctx.t, err == nil, fmt.tprintf(\"recv error: %i\", err))\n\n\t\t\tctx.received = received\n\t\t\tctx.done = true\n\t\t}\n\t}\n}\n\n@test\ntest_send_all :: proc(t: ^testing.T) {\n\tTest_Ctx :: struct {\n\t\tt:             ^testing.T,\n\t\tio:            ^IO,\n\t\tsend_buf:      []byte,\n\t\trecv_buf:      []byte,\n\t\tsent:          int,\n\t\treceived:      int,\n\t\taccepted_sock: Maybe(net.TCP_Socket),\n\t\tdone:          bool,\n\t\tep:            net.Endpoint,\n\t}\n\n\tio: IO\n\tinit(&io)\n\tdefer destroy(&io)\n\n\ttctx := Test_Ctx {\n\t\tsend_buf = make([]byte, mem.Megabyte * 50),\n\t\trecv_buf = make([]byte, mem.Megabyte * 60),\n\t}\n\tdefer delete(tctx.send_buf)\n\tdefer delete(tctx.recv_buf)\n\n\tslice.fill(tctx.send_buf, 1)\n\n\ttctx.t = t\n\ttctx.io = &io\n\n\ttctx.ep = {\n\t\taddress = net.IP4_Loopback,\n\t\tport    = 3132,\n\t}\n\n\tserver, err := open_and_listen_tcp(&io, tctx.ep)\n\texpect(t, err == nil, fmt.tprintf(\"create socket error: %s\", err))\n\n\tdefer close(&io, server)\n\tdefer close(&io, tctx.accepted_sock.?)\n\n\taccept(&io, server, &tctx, accept_callback)\n\n\tterr := tick(&io)\n\texpect(t, terr == os.ERROR_NONE, fmt.tprintf(\"tick error: %v\", terr))\n\n\tconnect(&io, tctx.ep, &tctx, connect_callback)\n\n\tfor !tctx.done {\n\t\tterr = tick(&io)\n\t\texpect(t, terr == os.ERROR_NONE, fmt.tprintf(\"tick error: %v\", terr))\n\t}\n\n\texpect(t, slice.simple_equal(tctx.send_buf, tctx.recv_buf[:mem.Megabyte * 50]), \"expected the sent bytes to be the same as the received\")\n\n\texpected := make([]byte, mem.Megabyte * 10)\n\tdefer delete(expected)\n\texpect(t, slice.simple_equal(tctx.recv_buf[mem.Megabyte * 50:], expected), \"expected the rest of the bytes to be 0\")\n\n\tconnect_callback :: proc(ctx: rawptr, sock: net.TCP_Socket, err: net.Network_Error) {\n\t\tctx := cast(^Test_Ctx)ctx\n\t\tsend_all(ctx.io, sock, ctx.send_buf, ctx, send_callback)\n\t}\n\n\tsend_callback :: proc(ctx: rawptr, res: int, err: net.Network_Error) {\n\t\tctx := cast(^Test_Ctx)ctx\n\t\tif !expect(ctx.t, err == nil, fmt.tprintf(\"send error: %i\", err)) {\n\t\t\tctx.done = true\n\t\t}\n\n\t\tctx.sent = res\n\t}\n\n\taccept_callback :: proc(ctx: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\t\tctx := cast(^Test_Ctx)ctx\n\t\tif !expect(ctx.t, err == nil, fmt.tprintf(\"accept error: %i\", err)) {\n\t\t\tctx.done = true\n\t\t}\n\n\t\tctx.accepted_sock = client\n\n\t\trecv(ctx.io, client, ctx.recv_buf, ctx, recv_callback)\n\t}\n\n\trecv_callback :: proc(ctx: rawptr, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {\n\t\tctx := cast(^Test_Ctx)ctx\n\t\tif !expect(ctx.t, err == nil, fmt.tprintf(\"recv error: %i\", err)) {\n\t\t\tctx.done = true\n\t\t}\n\n\t\tctx.received += received\n\t\tif ctx.received < mem.Megabyte * 50 {\n\t\t\trecv(ctx.io, ctx.accepted_sock.?, ctx.recv_buf[ctx.received:], ctx, recv_callback)\n\t\t\tlog.infof(\"received %.0M\", received)\n\t\t} else {\n\t\t\tctx.done = true\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "old_nbio/nbio_unix.odin",
    "content": "#+build darwin, linux\n#+private\npackage nbio\n\nimport \"core:net\"\nimport \"core:os\"\n\n_open :: proc(_: ^IO, path: string, mode, perm: int) -> (handle: os.Handle, errno: os.Errno) {\n\thandle, errno = os.open(path, mode, perm)\n\tif errno != os.ERROR_NONE { return }\n\n\terrno = _prepare_handle(handle)\n\tif errno != os.ERROR_NONE { os.close(handle) }\n\treturn\n}\n\n_seek :: proc(_: ^IO, fd: os.Handle, offset: int, whence: Whence) -> (int, os.Errno) {\n\tr, err := os.seek(fd, i64(offset), int(whence))\n\treturn int(r), err\n}\n\n_prepare_handle :: proc(fd: os.Handle) -> os.Errno {\n\t// NOTE: TCP_Socket gets cast to int right away in net, so this is safe to do.\n\tif err := net.set_blocking(net.TCP_Socket(fd), false); err != nil {\n\t\treturn os.Platform_Error((^i32)(&err)^)\n\t}\n\treturn os.ERROR_NONE\n}\n\n_open_socket :: proc(\n\t_: ^IO,\n\tfamily: net.Address_Family,\n\tprotocol: net.Socket_Protocol,\n) -> (\n\tsocket: net.Any_Socket,\n\terr: net.Network_Error,\n) {\n\tsocket, err = net.create_socket(family, protocol)\n\tif err != nil { return }\n\n\terr = _prepare_socket(socket)\n\tif err != nil { net.close(socket) }\n\treturn\n}\n\n_prepare_socket :: proc(socket: net.Any_Socket) -> net.Network_Error {\n\tnet.set_option(socket, .Reuse_Address, true) or_return\n\n\t// TODO; benchmark this, even if faster it is prob not to be turned on\n\t// by default here, maybe by default for the server, but I don't think this\n\t// will be faster/more efficient.\n\t// net.set_option(socket, .TCP_Nodelay, true) or_return\n\n\tnet.set_blocking(socket, false) or_return\n\treturn nil\n}\n"
  },
  {
    "path": "old_nbio/nbio_windows.odin",
    "content": "package nbio\n\nimport \"core:container/queue\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\n\nimport win \"core:sys/windows\"\n\n_init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {\n\tio.allocator = allocator\n\n\tpool_init(&io.completion_pool, allocator = allocator)\n\tqueue.init(&io.completed, allocator = allocator)\n\tio.timeouts = make([dynamic]^Completion, allocator)\n\tio.offsets = make(map[os.Handle]u32, allocator = allocator)\n\n\twin.ensure_winsock_initialized()\n\tdefer if err != nil {\n\t\tassert(win.WSACleanup() == win.NO_ERROR)\n\t}\n\n\tio.iocp = win.CreateIoCompletionPort(win.INVALID_HANDLE_VALUE, nil, 0, 0)\n\tif io.iocp == nil {\n\t\terr = os.Platform_Error(win.GetLastError())\n\t\treturn\n\t}\n\n\treturn\n}\n\n_destroy :: proc(io: ^IO) {\n\tcontext.allocator = io.allocator\n\n\tdelete(io.timeouts)\n\tqueue.destroy(&io.completed)\n\tpool_destroy(&io.completion_pool)\n\tdelete(io.offsets)\n\n\t// TODO: error handling.\n\twin.CloseHandle(io.iocp)\n\t// win.WSACleanup()\n}\n\n_num_waiting :: #force_inline proc(io: ^IO) -> int {\n\treturn io.completion_pool.num_waiting\n}\n\n_tick :: proc(io: ^IO) -> (err: os.Errno) {\n\tif queue.len(io.completed) == 0 {\n\t\tnext_timeout := flush_timeouts(io)\n\n\t\t// Wait a maximum of a ms if there is nothing to do.\n\t\t// TODO: this is pretty naive, a typical server always has accept completions pending and will be at 100% cpu.\n\t\twait_ms: win.DWORD = 1 if io.io_pending == 0 else 0\n\n\t\t// But, to counter inaccuracies in low timeouts,\n\t\t// lets make the call exit immediately if the next timeout is close.\n\t\tif nt, ok := next_timeout.?; ok && nt <= time.Millisecond * 15 {\n\t\t\twait_ms = 0\n\t\t}\n\n\t\tevents: [256]win.OVERLAPPED_ENTRY\n\t\tentries_removed: win.ULONG\n\t\tif !win.GetQueuedCompletionStatusEx(io.iocp, &events[0], len(events), &entries_removed, wait_ms, false) {\n\t\t\tif terr := win.GetLastError(); terr != win.WAIT_TIMEOUT {\n\t\t\t\terr = os.Platform_Error(terr)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// assert(io.io_pending >= int(entries_removed))\n\t\tio.io_pending -= int(entries_removed)\n\n\t\tfor event in events[:entries_removed] {\n\t\t\tif event.lpOverlapped == nil {\n\t\t\t\t@static logged: bool\n\t\t\t\tif !logged {\n\t\t\t\t\tlog.warn(\"You have ran into a strange error some users have ran into on Windows 10 but I can't reproduce, I try to recover from the error but please chime in at https://github.com/laytan/odin-http/issues/34\")\n\t\t\t\t\tlogged = true\n\t\t\t\t}\n\n\t\t\t\tio.io_pending += 1\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// This is actually pointing at the Completion.over field, but because it is the first field\n\t\t\t// It is also a valid pointer to the Completion struct.\n\t\t\tcompletion := cast(^Completion)event.lpOverlapped\n\t\t\tqueue.push_back(&io.completed, completion)\n\t\t}\n\t}\n\n\t// Prevent infinite loop when callback adds to completed by storing length.\n\tn := queue.len(io.completed)\n\tfor _ in 0 ..< n {\n\t\tcompletion := queue.pop_front(&io.completed)\n\t\tcontext = completion.ctx\n\n\t\thandle_completion(io, completion)\n\t}\n\treturn\n}\n\n_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> (err: net.Network_Error) {\n\tif res := win.listen(win.SOCKET(socket), i32(backlog)); res == win.SOCKET_ERROR {\n\t\terr = net._listen_error()\n\t}\n\treturn\n}\n\n// Basically a copy of `os.open`, where a flag is added to signal async io, and creation of IOCP.\n// Specifically the FILE_FLAG_OVERLAPPEd flag.\n_open :: proc(io: ^IO, path: string, mode, perm: int) -> (os.Handle, os.Errno) {\n\tif len(path) == 0 {\n\t\treturn os.INVALID_HANDLE, os.ERROR_FILE_NOT_FOUND\n\t}\n\n\taccess: u32\n\t//odinfmt:disable\n\tswitch mode & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) {\n\tcase os.O_RDONLY: access = win.FILE_GENERIC_READ\n\tcase os.O_WRONLY: access = win.FILE_GENERIC_WRITE\n\tcase os.O_RDWR:   access = win.FILE_GENERIC_READ | win.FILE_GENERIC_WRITE\n\t}\n\t//odinfmt:enable\n\n\tif mode & os.O_CREATE != 0 {\n\t\taccess |= win.FILE_GENERIC_WRITE\n\t}\n\tif mode & os.O_APPEND != 0 {\n\t\taccess &~= win.FILE_GENERIC_WRITE\n\t\taccess |= win.FILE_APPEND_DATA\n\t}\n\n\tshare_mode := win.FILE_SHARE_READ | win.FILE_SHARE_WRITE\n\tsa: ^win.SECURITY_ATTRIBUTES = nil\n\tsa_inherit := win.SECURITY_ATTRIBUTES {\n\t\tnLength        = size_of(win.SECURITY_ATTRIBUTES),\n\t\tbInheritHandle = true,\n\t}\n\tif mode & os.O_CLOEXEC == 0 {\n\t\tsa = &sa_inherit\n\t}\n\n\tcreate_mode: u32\n\tswitch {\n\tcase mode & (os.O_CREATE | os.O_EXCL) == (os.O_CREATE | os.O_EXCL):\n\t\tcreate_mode = win.CREATE_NEW\n\tcase mode & (os.O_CREATE | os.O_TRUNC) == (os.O_CREATE | os.O_TRUNC):\n\t\tcreate_mode = win.CREATE_ALWAYS\n\tcase mode & os.O_CREATE == os.O_CREATE:\n\t\tcreate_mode = win.OPEN_ALWAYS\n\tcase mode & os.O_TRUNC == os.O_TRUNC:\n\t\tcreate_mode = win.TRUNCATE_EXISTING\n\tcase:\n\t\tcreate_mode = win.OPEN_EXISTING\n\t}\n\n\tflags := win.FILE_ATTRIBUTE_NORMAL | win.FILE_FLAG_BACKUP_SEMANTICS\n\n\t// This line is the only thing different from the `os.open` procedure.\n\t// This makes it an asynchronous file that can be used in nbio.\n\tflags |= win.FILE_FLAG_OVERLAPPED\n\n\twide_path := win.utf8_to_wstring(path)\n\thandle := os.Handle(win.CreateFileW(wide_path, access, share_mode, sa, create_mode, flags, nil))\n\n\tif handle == os.INVALID_HANDLE {\n\t\terr := os.Platform_Error(win.GetLastError())\n\t\treturn os.INVALID_HANDLE, err\n\t}\n\n\t// Everything past here is custom/not from `os.open`.\n\n\thandle_iocp := win.CreateIoCompletionPort(win.HANDLE(handle), io.iocp, 0, 0)\n\tassert(handle_iocp == io.iocp)\n\n\tcmode: byte\n\tcmode |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS\n\tcmode |= FILE_SKIP_SET_EVENT_ON_HANDLE\n\tif !win.SetFileCompletionNotificationModes(win.HANDLE(handle), cmode) {\n\t\twin.CloseHandle(win.HANDLE(handle))\n\t\treturn os.INVALID_HANDLE, os.Platform_Error(win.GetLastError())\n\t}\n\n\tif mode & os.O_APPEND != 0 {\n\t\t_seek(io, handle, 0, .End)\n\t}\n\n\treturn handle, os.ERROR_NONE\n}\n\n_seek :: proc(io: ^IO, fd: os.Handle, offset: int, whence: Whence) -> (int, os.Errno) {\n\tswitch whence {\n\tcase .Set:\n\t\tio.offsets[fd] = u32(offset)\n\tcase .Curr:\n\t\tio.offsets[fd] += u32(offset)\n\tcase .End:\n\t\tsize: win.LARGE_INTEGER\n\t\tok := win.GetFileSizeEx(win.HANDLE(fd), &size)\n\t\tif !ok {\n\t\t\treturn 0, os.Platform_Error(win.GetLastError())\n\t\t}\n\n\t\tio.offsets[fd] = u32(size) + u32(offset)\n\t}\n\n\treturn int(io.offsets[fd]), os.ERROR_NONE\n}\n\n_open_socket :: proc(\n\tio: ^IO,\n\tfamily: net.Address_Family,\n\tprotocol: net.Socket_Protocol,\n) -> (\n\tsocket: net.Any_Socket,\n\terr: net.Network_Error,\n) {\n\tsocket, err = net.create_socket(family, protocol)\n\tif err != nil { return }\n\n\terr = prepare_socket(io, socket)\n\tif err != nil { net.close(socket) }\n\treturn\n}\n\n_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {\n\treturn submit(\n\t\tio,\n\t\tuser,\n\t\tOp_Accept{\n\t\t\tcallback = callback,\n\t\t\tsocket   = win.SOCKET(socket),\n\t\t\tclient   = win.INVALID_SOCKET,\n\t\t},\n\t)\n}\n\n_connect :: proc(io: ^IO, ep: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {\n\tif ep.port == 0 {\n\t\treturn nil, net.Dial_Error.Port_Required\n\t}\n\n\treturn submit(io, user, Op_Connect{\n\t\tcallback = callback,\n\t\taddr     = endpoint_to_sockaddr(ep),\n\t}), nil\n}\n\n_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {\n\treturn submit(io, user, Op_Close{callback = callback, fd = fd})\n}\n\n_read :: proc(\n\tio: ^IO,\n\tfd: os.Handle,\n\toffset: Maybe(int),\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Read,\n\tall := false,\n) -> ^Completion {\n\treturn submit(io, user, Op_Read{\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\toffset   = offset.? or_else -1,\n\t\tbuf      = buf,\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t})\n}\n\n_write :: proc(\n\tio: ^IO,\n\tfd: os.Handle,\n\toffset: Maybe(int),\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Write,\n\tall := false,\n) -> ^Completion {\n\treturn submit(io, user, Op_Write{\n\t\tcallback = callback,\n\t\tfd       = fd,\n\t\toffset   = offset.? or_else -1,\n\t\tbuf      = buf,\n\n\t\tall      = all,\n\t\tlen      = len(buf),\n\t})\n}\n\n_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {\n\t// TODO: implement UDP.\n\tif _, ok := socket.(net.UDP_Socket); ok { unimplemented(\"nbio.recv with UDP sockets is not yet implemented\") }\n\n\treturn submit(\n\t\tio,\n\t\tuser,\n\t\tOp_Recv{\n\t\t\tcallback = callback,\n\t\t\tsocket   = socket,\n\t\t\tbuf      = win.WSABUF{len = win.ULONG(len(buf)), buf = raw_data(buf)},\n\t\t\tall      = all,\n\t\t\tlen      = len(buf),\n\t\t},\n\t)\n}\n\n_send :: proc(\n\tio: ^IO,\n\tsocket: net.Any_Socket,\n\tbuf: []byte,\n\tuser: rawptr,\n\tcallback: On_Sent,\n\tendpoint: Maybe(net.Endpoint) = nil,\n\tall := false,\n) -> ^Completion {\n\t// TODO: implement UDP.\n\tif _, ok := socket.(net.UDP_Socket); ok { unimplemented(\"nbio.send with UDP sockets is not yet implemented\") }\n\n\treturn submit(\n\t\tio,\n\t\tuser,\n\t\tOp_Send{\n\t\t\tcallback = callback,\n\t\t\tsocket   = socket,\n\t\t\tbuf      = win.WSABUF{len = win.ULONG(len(buf)), buf = raw_data(buf)},\n\n\t\t\tall      = all,\n\t\t\tlen      = len(buf),\n\t\t},\n\t)\n}\n\n_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {\n\tcompletion := pool_get(&io.completion_pool)\n\n\tcompletion.op = Op_Timeout {\n\t\tcallback = callback,\n\t\texpires  = time.time_add(time.now(), dur),\n\t}\n\tcompletion.user_data = user\n\tcompletion.ctx = context\n\n\tappend(&io.timeouts, completion)\n\treturn completion\n}\n\n_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {\n\tpanic(\"unimplemented on windows: next_tick\")\n}\n\n_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {\n\tpanic(\"unimplemented on windows: poll\")\n}\n\n_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {\n\tpanic(\"unimplemented on windows: poll_remove\")\n}\n"
  },
  {
    "path": "old_nbio/poly/poly.odin",
    "content": "// Package nbio/poly contains variants of the nbio procedures that use generic/poly data\n// so users can avoid casts and use multiple arguments.\n//\n// Please reference the documentation in `nbio`.\n//\n// Intention is to import this like so `import nbio \"nbio/poly\"`\npackage poly\n\nimport \"core:mem\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\n\nimport nbio \"..\"\n\n// Because mem is only used inside the poly procs, the checker thinks we aren't using it.\n_ :: mem\n\n/// Re-export `nbio` stuff that is not wrapped in this package.\n\nCompletion          :: nbio.Completion\nIO                  :: nbio.IO\ninit                :: nbio.init\ntick                :: nbio.tick\nnum_waiting         :: nbio.num_waiting\ndestroy             :: nbio.destroy\nopen_socket         :: nbio.open_socket\nopen_and_listen_tcp :: nbio.open_and_listen_tcp\nlisten              :: nbio.listen\nClosable            :: nbio.Closable\nopen                :: nbio.open\nWhence              :: nbio.Whence\nseek                :: nbio.seek\nPoll_Event          :: nbio.Poll_Event\npoll_remove         :: nbio.poll_remove\n\n/// Timeout\n\ntimeout :: proc {\n\ttimeout1,\n\ttimeout2,\n\ttimeout3,\n}\n\ntimeout1 :: proc(io: ^nbio.IO, dur: time.Duration, p: $T, callback: $C/proc(p: T))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._timeout(io, dur, nil, proc(completion: rawptr) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p)\n\t})\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\ntimeout2 :: proc(io: ^nbio.IO, dur: time.Duration, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._timeout(io, dur, nil, proc(completion: rawptr) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2)\n\t})\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\ntimeout3 :: proc(io: ^nbio.IO, dur: time.Duration, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._timeout(io, dur, nil, proc(completion: rawptr) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3)\n\t})\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Close\n\nclose :: proc {\n\tclose_no_cb,\n\tclose1,\n\tclose2,\n\tclose3,\n}\n\nclose_no_cb :: proc(io: ^nbio.IO, fd: nbio.Closable) {\n\tnbio.close(io, fd)\n}\n\nclose1 :: proc(io: ^nbio.IO, fd: nbio.Closable, p: $T, callback: $C/proc(p: T, ok: bool))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._close(io, fd, nil, proc(completion: rawptr, ok: bool) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, ok)\n\t})\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\nclose2 :: proc(io: ^nbio.IO, fd: nbio.Closable, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, ok: bool))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._close(io, fd, nil, proc(completion: rawptr, ok: bool) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, ok)\n\t})\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\nclose3 :: proc(io: ^nbio.IO, fd: nbio.Closable, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, ok: bool))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._close(io, fd, nil, proc(completion: rawptr, ok: bool) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T3):]))^\n\n\t\tcb(p, p2, p3, ok)\n\t})\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Accept\n\naccept :: proc {\n\taccept1,\n\taccept2,\n\taccept3,\n}\n\naccept1 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, p: $T, callback: $C/proc(p: T, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._accept(io, socket, nil, proc(completion: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\t\tcb         := (^C)(&completion.user_args[0])^\n\t\tp          := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, client, source, err)\n\t})\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\naccept2 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._accept(io, socket, nil, proc(completion: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, client, source, err)\n\t})\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\naccept3 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._accept(io, socket, nil, proc(completion: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, client, source, err)\n\t})\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Connect\n\nconnect :: proc {\n\tconnect1,\n\tconnect2,\n\tconnect3,\n}\n\nconnect1 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, p: $T, callback: $C/proc(p: T, socket: net.TCP_Socket, err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion, err := nbio._connect(io, endpoint, nil, proc(completion: rawptr, socket: net.TCP_Socket, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, socket, err)\n\t})\n    if err != nil {\n        callback(p, {}, err)\n        return\n    }\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\nconnect2 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, socket: net.TCP_Socket, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion, err := nbio._connect(io, endpoint, nil, proc(completion: rawptr, socket: net.TCP_Socket, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, socket, err)\n\t})\n    if err != nil {\n        callback(p, p2, {}, err)\n        return\n    }\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\nconnect3 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, socket: net.TCP_Socket, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion, err := nbio._connect(io, endpoint, nil, proc(completion: rawptr, socket: net.TCP_Socket, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, socket, err)\n\t})\n    if err != nil {\n        callback(p, p2, p3, {}, err)\n        return\n    }\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Internal Recv\n\n_recv :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, all: bool, p: $T, callback: $C/proc(p: T, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._recv(io, socket, buf, nil, proc(completion: rawptr, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, received, udp_client, err)\n\t})\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\n_recv2 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, all: bool, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._recv(io, socket, buf, nil, proc(completion: rawptr, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, received, udp_client, err)\n\t})\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\n_recv3 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, all: bool, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._recv(io, socket, buf, nil, proc(completion: rawptr, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, received, udp_client, err)\n\t})\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Recv\n\nrecv :: proc {\n\trecv1,\n\trecv2,\n\trecv3,\n}\n\nrecv1 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_recv(io, socket, buf, false, p, callback)\n}\n\nrecv2 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_recv2(io, socket, buf, false, p, p2, callback)\n}\n\nrecv3 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_recv3(io, socket, buf, false, p, p2, p3, callback)\n}\n\n/// Recv All\n\nrecv_all :: proc {\n\trecv_all1,\n\trecv_all2,\n\trecv_all3,\n}\n\nrecv_all1 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_recv(io, socket, buf, true, p, callback)\n}\n\nrecv_all2 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_recv2(io, socket, buf, true, p, p2, callback)\n}\n\nrecv_all3 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_recv3(io, socket, buf, true, p, p2, p3, callback)\n}\n\n/// Send\n\nsend :: proc {\n\tsend_tcp1,\n\tsend_tcp2,\n\tsend_tcp3,\n\tsend_udp1,\n\tsend_udp2,\n\tsend_udp3,\n}\n\n/// Send Internal\n\n_send :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, sent: int, err: net.Network_Error), endpoint: Maybe(net.Endpoint) = nil, all := false)\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._send(io, socket, buf, nil, proc(completion: rawptr, sent: int, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, sent, err)\n\t}, endpoint, all)\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\n_send2 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, sent: int, err: net.Network_Error), endpoint: Maybe(net.Endpoint) = nil, all := false)\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._send(io, socket, buf, nil, proc(completion: rawptr, sent: int, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, sent, err)\n\t}, endpoint, all)\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\n_send3 :: proc(io: ^nbio.IO, socket: net.Any_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, sent: int, err: net.Network_Error), endpoint: Maybe(net.Endpoint) = nil, all := false)\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._send(io, socket, buf, nil, proc(completion: rawptr, sent: int, err: net.Network_Error) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, sent, err)\n\t}, endpoint, all)\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Send TCP\n\nsend_tcp1 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, sent: int, err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_send(io, socket, buf, p, callback)\n}\n\nsend_tcp2 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_send2(io, socket, buf, p, p2, callback)\n}\n\nsend_tcp3 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_send3(io, socket, buf, p, p2, p3, callback)\n}\n\n/// Send UDP\n\nsend_udp1 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, socket: net.UDP_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, sent: int, err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_send(io, socket, buf, p, callback, endpoint)\n}\n\nsend_udp2 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, socket: net.UDP_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_send2(io, socket, buf, p, p2, callback, endpoint)\n}\n\nsend_udp3 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, socket: net.UDP_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_send3(io, socket, buf, p, p2, p3, callback, endpoint)\n}\n\n/// Send All\n\nsend_all :: proc {\n\tsend_all_tcp1,\n\tsend_all_tcp2,\n\tsend_all_tcp3,\n\tsend_all_udp1,\n\tsend_all_udp2,\n\tsend_all_udp3,\n}\n\n/// Send All TCP\n\nsend_all_tcp1 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, sent: int, err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_send(io, socket, buf, p, callback, all = true)\n}\n\nsend_all_tcp2 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_send2(io, socket, buf, p, p2, callback, all = true)\n}\n\nsend_all_tcp3 :: proc(io: ^nbio.IO, socket: net.TCP_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_send3(io, socket, buf, p, p2, p3, callback, all = true)\n}\n\n/// Send All UDP\n\nsend_all_udp1 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, socket: net.UDP_Socket, buf: []byte, p: $T, callback: $C/proc(p: T, sent: int, err: net.Network_Error))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_send(io, socket, buf, p, callback, endpoint, all = true)\n}\n\nsend_all_udp2 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, socket: net.UDP_Socket, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_send2(io, socket, buf, p, p2, callback, endpoint, all = true)\n}\n\nsend_all_udp3 :: proc(io: ^nbio.IO, endpoint: net.Endpoint, socket: net.UDP_Socket, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, sent: int, err: net.Network_Error))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_send3(io, socket, buf, p, p2, p3, callback, endpoint, all = true)\n}\n\n/// Read Internal\n\n_read :: proc(io: ^nbio.IO, fd: os.Handle, offset: Maybe(int), buf: []byte, p: $T, callback: $C/proc(p: T, read: int, err: os.Errno), all := false)\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._read(io, fd, offset, buf, nil, proc(completion: rawptr, read: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, read, err)\n\t}, all)\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\n_read2 :: proc(io: ^nbio.IO, fd: os.Handle, offset: Maybe(int), buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, read: int, err: os.Errno), all := false)\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._read(io, fd, offset, buf, nil, proc(completion: rawptr, read: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, read, err)\n\t}, all)\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\n_read3 :: proc(io: ^nbio.IO, fd: os.Handle, offset: Maybe(int), buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, read: int, err: os.Errno), all := false)\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._read(io, fd, offset, buf, nil, proc(completion: rawptr, read: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, read, err)\n\t}, all)\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Read\n\nread :: proc {\n\tread1,\n\tread2,\n\tread3,\n}\n\nread1 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, callback: $C/proc(p: T, read: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_read(io, fd, nil, buf, p, callback)\n}\n\nread2 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_read2(io, fd, nil, buf, p, p2, callback)\n}\n\nread3 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_read3(io, fd, nil, buf, p, p2, p3, callback)\n}\n\n/// Read All\n\nread_all :: proc {\n\tread_all1,\n\tread_all2,\n\tread_all3,\n}\n\nread_all1 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, callback: $C/proc(p: T, read: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_read(io, fd, nil, buf, p, callback, all = true)\n}\n\nread_all2 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_read2(io, fd, nil, buf, p, p2, callback, all = true)\n}\n\nread_all3 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_read3(io, fd, nil, buf, p, p2, p3, callback, all = true)\n}\n\n/// Read At\n\nread_at :: proc {\n\tread_at1,\n\tread_at2,\n\tread_at3,\n}\n\nread_at1 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, callback: $C/proc(p: T, read: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_read(io, fd, offset, buf, p, callback)\n}\n\nread_at2 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_read2(io, fd, offset, buf, p, p2, callback)\n}\n\nread_at3 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_read3(io, fd, offset, buf, p, p2, p3, callback)\n}\n\n/// Read At All\n\nread_at_all :: proc {\n\tread_at_all1,\n\tread_at_all2,\n\tread_at_all3,\n}\n\nread_at_all1 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, callback: $C/proc(p: T, read: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_read(io, fd, offset, buf, p, callback, all = true)\n}\n\nread_at_all2 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_read2(io, fd, offset, buf, p, p2, callback, all = true)\n}\n\nread_at_all3 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, read: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_read3(io, fd, offset, buf, p, p2, p3, callback, all = true)\n}\n\n/// Read Full / Entire File\n\nread_entire_file  :: read_full\n\nread_full :: proc {\n\tread_full1,\n\tread_full2,\n\tread_full3,\n}\n\nread_full1 :: proc(io: ^nbio.IO, fd: os.Handle, p: $T, callback: $C/proc(p: T, buf: []byte, read: int, err: os.Errno), allocator := context.allocator)\n\twhere size_of(T) + size_of([]byte) <= nbio.MAX_USER_ARGUMENTS {\n\tsize, err := seek(io, fd, 0, .End)\n\tif err != os.ERROR_NONE {\n\t\tcallback(p, nil, 0, err)\n\t\treturn\n\t}\n\n\tif size <= 0 {\n\t\tcallback(p, nil, 0, os.ERROR_NONE)\n\t\treturn\n\t}\n\n\tbuf := make([]byte, size, allocator)\n\n\tcompletion := nbio._read(io, fd, 0, buf, nil, proc(completion: rawptr, read: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb  := (^C)     (&completion.user_args[0])^\n\t\tbuf := (^[]byte)(raw_data(completion.user_args[size_of(C):]))^\n\t\tp   := (^T)     (raw_data(completion.user_args[size_of(C) + size_of([]byte):]))^\n\n\t\tcb(p, buf, read, err)\n\t}, all = true)\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&buf))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\nread_full2 :: proc(io: ^nbio.IO, fd: os.Handle, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, buf: []byte, read: int, err: os.Errno), allocator := context.allocator)\n\twhere size_of(T) + size_of(T2) + size_of([]byte) <= nbio.MAX_USER_ARGUMENTS {\n\tsize, err := seek(io, fd, 0, .End)\n\tif err != os.ERROR_NONE {\n\t\tcallback(p, p2, nil, 0, err)\n\t\treturn\n\t}\n\n\tif size <= 0 {\n\t\tcallback(p, p2, nil, 0, os.ERROR_NONE)\n\t\treturn\n\t}\n\n\tbuf := make([]byte, size, allocator)\n\n\tcompletion := nbio._read(io, fd, 0, buf, nil, proc(completion: rawptr, read: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb  := (^C)     (&completion.user_args[0])^\n\t\tbuf := (^[]byte)(raw_data(completion.user_args[size_of(C):]))^\n\t\tp   := (^T)     (raw_data(completion.user_args[size_of(C) + size_of([]byte):]))^\n\t\tp2  := (^T2)    (raw_data(completion.user_args[size_of(C) + size_of([]byte) + size_of(T):]))^\n\n\t\tcb(p, p2, buf, read, err)\n\t}, all = true)\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&buf))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\nread_full3 :: proc(io: ^nbio.IO, fd: os.Handle, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, buf: []byte, read: int, err: os.Errno), allocator := context.allocator)\n\twhere size_of(T) + size_of(T2) + size_of(T3) + size_of([]byte) <= nbio.MAX_USER_ARGUMENTS {\n\tsize, err := seek(io, fd, 0, .End)\n\tif err != os.ERROR_NONE {\n\t\tcallback(p, p2, p3, nil, 0, err)\n\t\treturn\n\t}\n\n\tif size <= 0 {\n\t\tcallback(p, p2, p3, nil, 0, os.ERROR_NONE)\n\t\treturn\n\t}\n\n\tbuf := make([]byte, size, allocator)\n\n\tcompletion := nbio._read(io, fd, 0, buf, nil, proc(completion: rawptr, read: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb  := (^C)     (&completion.user_args[0])^\n\t\tbuf := (^[]byte)(raw_data(completion.user_args[size_of(C):]))^\n\t\tp   := (^T)     (raw_data(completion.user_args[size_of(C) + size_of([]byte):]))^\n\t\tp2  := (^T2)    (raw_data(completion.user_args[size_of(C) + size_of([]byte) + size_of(T):]))^\n\t\tp3  := (^T3)    (raw_data(completion.user_args[size_of(C) + size_of([]byte) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, buf, read, err)\n\t}, all = true)\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&buf))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Write Internal\n\n_write :: proc(io: ^nbio.IO, fd: os.Handle, offset: Maybe(int), buf: []byte, p: $T, callback: $C/proc(p: T, written: int, err: os.Errno), all := false)\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._write(io, fd, offset, buf, nil, proc(completion: rawptr, written: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, written, err)\n\t}, all)\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\n_write2 :: proc(io: ^nbio.IO, fd: os.Handle, offset: Maybe(int), buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, written: int, err: os.Errno), all := false)\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._write(io, fd, offset, buf, nil, proc(completion: rawptr, written: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, written, err)\n\t}, all)\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\n_write3 :: proc(io: ^nbio.IO, fd: os.Handle, offset: Maybe(int), buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, written: int, err: os.Errno), all := false)\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._write(io, fd, offset, buf, nil, proc(completion: rawptr, written: int, err: os.Errno) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, written, err)\n\t}, all)\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\n/// Write\n\nwrite :: proc {\n\twrite1,\n\twrite2,\n\twrite3,\n}\n\nwrite1 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, callback: $C/proc(p: T, written: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_write(io, fd, nil, buf, p, callback)\n}\n\nwrite2 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_write2(io, fd, nil, buf, p, p2, callback)\n}\n\nwrite3 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_write3(io, fd, nil, buf, p, p2, p3, callback)\n}\n\n/// Write All\n\nwrite_all :: proc {\n\twrite_all1,\n\twrite_all2,\n\twrite_all3,\n}\n\nwrite_all1 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, callback: $C/proc(p: T, written: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_write(io, fd, nil, buf, p, callback, all = true)\n}\n\nwrite_all2 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_write2(io, fd, nil, buf, p, p2, callback, all = true)\n}\n\nwrite_all3 :: proc(io: ^nbio.IO, fd: os.Handle, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_write3(io, fd, nil, buf, p, p2, p3, callback, all = true)\n}\n\n/// Write At\n\nwrite_at :: proc {\n\twrite_at1,\n\twrite_at2,\n\twrite_at3,\n}\n\nwrite_at1 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, callback: $C/proc(p: T, written: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_write(io, fd, offset, buf, p, callback)\n}\n\nwrite_at2 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_write2(io, fd, offset, buf, p, p2, callback)\n}\n\nwrite_at3 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_write3(io, fd, offset, buf, p, p2, p3, callback)\n}\n\n/// Write At All\n\nwrite_at_all :: proc {\n\twrite_at_all1,\n\twrite_at_all2,\n\twrite_at_all3,\n}\n\nwrite_at_all1 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, callback: $C/proc(p: T, written: int, err: os.Errno))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\t_write(io, fd, offset, buf, p, callback, all = true)\n}\n\nwrite_at_all2 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\t_write2(io, fd, offset, buf, p, p2, callback, all = true)\n}\n\nwrite_at_all3 :: proc(io: ^nbio.IO, fd: os.Handle, offset: int, buf: []byte, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, written: int, err: os.Errno))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\t_write3(io, fd, offset, buf, p, p2, p3, callback, all = true)\n}\n\nnext_tick1 :: proc(io: ^IO, p: $T, callback: $C/proc(p: T))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._next_tick(io, nil, proc(completion: rawptr) {\n\t\tcompletion := (^Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p)\n\t})\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\nnext_tick2 :: proc(io: ^nbio.IO, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._next_tick(io, nil, proc(completion: rawptr) {\n\t\tcompletion := (^nbio.Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2)\n\t})\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\nnext_tick3 :: proc(io: ^IO, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._next_tick(io, nil, proc(completion: rawptr) {\n\t\tcompletion := (^Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3)\n\t})\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\nnext_tick :: proc {\n\tnext_tick1,\n\tnext_tick2,\n\tnext_tick3,\n}\n\npoll1 :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, p: $T, callback: $C/proc(p: T, event: Poll_Event))\n\twhere size_of(T) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._poll(io, fd, event, multi, nil, proc(completion: rawptr, event: Poll_Event) {\n\t\tcompletion := (^Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\n\t\tcb(p, event)\n\t})\n\n\tcallback, p := callback, p\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\n\tcompletion.user_data = completion\n}\n\npoll2 :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, p: $T, p2: $T2, callback: $C/proc(p: T, p2: T2, event: Poll_Event))\n\twhere size_of(T) + size_of(T2) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._poll(io, fd, event, multi, nil, proc(completion: rawptr, event: Poll_Event) {\n\t\tcompletion := (^Completion)(completion)\n\n\t\tcb := (^C)(&completion.user_args[0])^\n\t\tp  := (^T)(raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\n\t\tcb(p, p2, event)\n\t})\n\n\tcallback, p, p2 := callback, p, p2\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\n\tcompletion.user_data = completion\n}\n\npoll3 :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, p: $T, p2: $T2, p3: $T3, callback: $C/proc(p: T, p2: T2, p3: T3, event: Poll_Event))\n\twhere size_of(T) + size_of(T2) + size_of(T3) <= nbio.MAX_USER_ARGUMENTS {\n\tcompletion := nbio._poll(io, fd, event, multi, nil, proc(completion: rawptr, event: Poll_Event) {\n\t\tcompletion := (^Completion)(completion)\n\n\t\tcb := (^C) (&completion.user_args[0])^\n\t\tp  := (^T) (raw_data(completion.user_args[size_of(C):]))^\n\t\tp2 := (^T2)(raw_data(completion.user_args[size_of(C) + size_of(T):]))^\n\t\tp3 := (^T3)(raw_data(completion.user_args[size_of(C) + size_of(T) + size_of(T2):]))^\n\n\t\tcb(p, p2, p3, event)\n\t})\n\n\tcallback, p, p2, p3 := callback, p, p2, p3\n\tn := copy(completion.user_args[:],  mem.ptr_to_bytes(&callback))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p))\n\tn += copy(completion.user_args[n:], mem.ptr_to_bytes(&p2))\n\t_  = copy(completion.user_args[n:], mem.ptr_to_bytes(&p3))\n\n\tcompletion.user_data = completion\n}\n\npoll :: proc {\n\tpoll1,\n\tpoll2,\n\tpoll3,\n}\n"
  },
  {
    "path": "old_nbio/pool.odin",
    "content": "#+private\npackage nbio\n\nimport \"core:container/queue\"\nimport \"core:mem\"\nimport \"core:mem/virtual\"\n\n// An object pool where the objects are allocated on a growing arena.\nPool :: struct($T: typeid) {\n\tallocator:         mem.Allocator,\n\tarena:             virtual.Arena,\n\tobjects_allocator: mem.Allocator,\n\tobjects:           queue.Queue(^T),\n\tnum_waiting:       int,\n}\n\nDEFAULT_STARTING_CAP :: 8\n\npool_init :: proc(p: ^Pool($T), cap := DEFAULT_STARTING_CAP, allocator := context.allocator) -> mem.Allocator_Error {\n\tvirtual.arena_init_growing(&p.arena) or_return\n\tp.objects_allocator = virtual.arena_allocator(&p.arena)\n\n\tp.allocator = allocator\n\tqueue.init(&p.objects, cap, allocator) or_return\n\tfor _ in 0 ..< cap {\n\t\t_ = queue.push_back(&p.objects, new(T, p.objects_allocator)) or_return\n\t}\n\n\treturn nil\n}\n\npool_destroy :: proc(p: ^Pool($T)) {\n\tvirtual.arena_destroy(&p.arena)\n\tqueue.destroy(&p.objects)\n}\n\npool_get :: proc(p: ^Pool($T)) -> (^T, mem.Allocator_Error) #optional_allocator_error {\n\tp.num_waiting += 1\n\n\telem, ok := queue.pop_front_safe(&p.objects)\n\tif !ok {\n\t\treturn new(T, p.objects_allocator)\n\t}\n\n\tmem.zero_item(elem)\n\treturn elem, nil\n}\n\npool_put :: proc(p: ^Pool($T), elem: ^T) -> mem.Allocator_Error {\n\tp.num_waiting -= 1\n\n\t_, err := queue.push_back(&p.objects, elem)\n\treturn err\n}\n"
  },
  {
    "path": "openssl/.version",
    "content": "openssl-3.6.2\n"
  },
  {
    "path": "openssl/openssl.odin",
    "content": "package openssl\n\nimport \"core:c\"\nimport \"core:c/libc\"\n\nSHARED :: #config(OPENSSL_SHARED, false)\n\nwhen ODIN_OS == .Windows {\n\twhen SHARED {\n\t\tforeign import lib {\n\t\t\t\"./includes/windows/libssl.lib\",\n\t\t\t\"./includes/windows/libcrypto.lib\",\n\t\t}\n\t} else {\n\t\t// @(extra_linker_flags=\"/nodefaultlib:libcmt\")\n\t\tforeign import lib {\n\t\t\t\"./includes/windows/libssl_static.lib\",\n\t\t\t\"./includes/windows/libcrypto_static.lib\",\n\t\t\t\"system:ws2_32.lib\",\n\t\t\t\"system:gdi32.lib\",\n\t\t\t\"system:advapi32.lib\",\n\t\t\t\"system:crypt32.lib\",\n\t\t\t\"system:user32.lib\",\n\t\t}\n\t}\n} else when ODIN_OS == .Darwin {\n\tforeign import lib {\n\t\t\"system:ssl.3\",\n\t\t\"system:crypto.3\",\n\t}\n} else {\n\tforeign import lib {\n\t\t\"system:ssl\",\n\t\t\"system:crypto\",\n\t}\n}\n\nVersion :: bit_field u32 {\n\tpre_release: uint | 4,\n\tpatch:       uint | 16,\n\tminor:       uint | 8,\n\tmajor:       uint | 4,\n}\n\nVERSION: Version\n\n@(private, init)\nversion_check :: proc \"contextless\" () {\n\tVERSION = Version(OpenSSL_version_num())\n\tassert_contextless(VERSION.major == 3, \"invalid OpenSSL library version, expected 3.x\")\n}\n\nSSL_METHOD :: struct {}\nSSL_CTX :: struct {}\nSSL :: struct {}\n\nSSL_CTRL_SET_TLSEXT_HOSTNAME :: 55\n\nTLSEXT_NAMETYPE_host_name :: 0\n\nforeign lib {\n\tTLS_client_method :: proc() -> ^SSL_METHOD ---\n\tSSL_CTX_new :: proc(method: ^SSL_METHOD) -> ^SSL_CTX ---\n\tSSL_new :: proc(ctx: ^SSL_CTX) -> ^SSL ---\n\tSSL_set_fd :: proc(ssl: ^SSL, fd: c.int) -> c.int ---\n\tSSL_connect :: proc(ssl: ^SSL) -> c.int ---\n\tSSL_get_error :: proc(ssl: ^SSL, ret: c.int) -> c.int ---\n\tSSL_read :: proc(ssl: ^SSL, buf: [^]byte, num: c.int) -> c.int ---\n\tSSL_write :: proc(ssl: ^SSL, buf: [^]byte, num: c.int) -> c.int ---\n\tSSL_free :: proc(ssl: ^SSL) ---\n\tSSL_CTX_free :: proc(ctx: ^SSL_CTX) ---\n\tERR_print_errors_fp :: proc(fp: ^libc.FILE) ---\n\tSSL_ctrl :: proc(ssl: ^SSL, cmd: c.int, larg: c.long, parg: rawptr) -> c.long ---\n    OpenSSL_version_num :: proc() -> c.ulong ---\n}\n\n// This is a macro in c land.\nSSL_set_tlsext_host_name :: proc(ssl: ^SSL, name: cstring) -> c.int {\n\treturn c.int(SSL_ctrl(ssl, SSL_CTRL_SET_TLSEXT_HOSTNAME, TLSEXT_NAMETYPE_host_name, rawptr(name)))\n}\n\nERR_print_errors :: proc {\n\tERR_print_errors_fp,\n\tERR_print_errors_stderr,\n}\n\nERR_print_errors_stderr :: proc() {\n\tERR_print_errors_fp(libc.stderr)\n}\n"
  },
  {
    "path": "request.odin",
    "content": "package http\n\nimport \"core:net\"\nimport \"core:strings\"\n\nRequest :: struct {\n\t// If in a handler, this is always there and never None.\n\t// TODO: we should not expose this as a maybe to package users.\n\tline:       Maybe(Requestline),\n\n\t// Is true if the request is actually a HEAD request,\n\t// line.method will be .Get if Server_Opts.redirect_head_to_get is set.\n\tis_head:    bool,\n\n\theaders:    Headers,\n\turl:        URL,\n\tclient:     net.Endpoint,\n\n\t// Route params/captures.\n\turl_params: []string,\n\n\t// Internal usage only.\n\t_scanner:   ^Scanner,\n\t_body_ok:   Maybe(bool),\n}\n\nrequest_init :: proc(r: ^Request, allocator := context.allocator) {\n\theaders_init(&r.headers, allocator)\n}\n\n// TODO: call it headers_sanitize because it modifies the headers.\n\n// Validates the headers of a request, from the pov of the server.\nheaders_validate_for_server :: proc(headers: ^Headers) -> bool {\n\t// RFC 7230 5.4: A server MUST respond with a 400 (Bad Request) status code to any\n\t// HTTP/1.1 request message that lacks a Host header field.\n\tif !headers_has_unsafe(headers^, \"host\") {\n\t\treturn false\n\t}\n\n\treturn headers_validate(headers)\n}\n\n// Validates the headers, use `headers_validate_for_server` if these are request headers\n// that should be validated from the server side.\nheaders_validate :: proc(headers: ^Headers) -> bool {\n\t// RFC 7230 3.3.3: If a Transfer-Encoding header field\n\t// is present in a request and the chunked transfer coding is not\n\t// the final encoding, the message body length cannot be determined\n\t// reliably; the server MUST respond with the 400 (Bad Request)\n\t// status code and then close the connection.\n\tif enc_header, ok := headers_get_unsafe(headers^, \"transfer-encoding\"); ok {\n\t\tstrings.has_suffix(enc_header, \"chunked\") or_return\n\t}\n\n\t// RFC 7230 3.3.3: If a message is received with both a Transfer-Encoding and a\n\t// Content-Length header field, the Transfer-Encoding overrides the\n\t// Content-Length.  Such a message might indicate an attempt to\n\t// perform request smuggling (Section 9.5) or response splitting\n\t// (Section 9.4) and ought to be handled as an error.\n\tif headers_has_unsafe(headers^, \"transfer-encoding\") && headers_has_unsafe(headers^, \"content-length\") {\n\t\theaders_delete_unsafe(headers, \"content-length\")\n\t}\n\n\treturn true\n}\n"
  },
  {
    "path": "response.odin",
    "content": "package http\n\nimport \"core:bytes\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:mem/virtual\"\nimport \"core:nbio\"\nimport \"core:slice\"\nimport \"core:strconv\"\n\nResponse :: struct {\n\t// Add your headers and cookies here directly.\n\theaders:          Headers,\n\tcookies:          [dynamic]Cookie,\n\n\t// If the response has been sent.\n\tsent:             bool,\n\n\t// NOTE: use `http.response_status` if the response body might have been set already.\n\tstatus:           Status,\n\n\t// Only for internal usage.\n\t_conn:            ^Connection,\n\t// TODO/PERF: with some internal refactoring, we should be able to write directly to the\n\t// connection (maybe a small buffer in this struct).\n\t_buf:             bytes.Buffer,\n\t_heading_written: bool,\n}\n\nresponse_init :: proc(r: ^Response, allocator := context.allocator) {\n\tr.status             = .Not_Found\n\tr.cookies.allocator  = allocator\n\tr._buf.buf.allocator = allocator\n\n\theaders_init(&r.headers, allocator)\n}\n\n/*\nPrefer the procedure group `body_set`.\n*/\nbody_set_bytes :: proc(r: ^Response, byts: []byte, loc := #caller_location) {\n\tassert(bytes.buffer_length(&r._buf) == 0, \"the response body has already been written\", loc)\n\t_response_write_heading(r, len(byts))\n\tbytes.buffer_write(&r._buf, byts)\n}\n\n/*\nPrefer the procedure group `body_set`.\n*/\nbody_set_str :: proc(r: ^Response, str: string, loc := #caller_location) {\n\t// This is safe because we don't write to the bytes.\n\tbody_set_bytes(r, transmute([]byte)str, loc)\n}\n\n/*\nSets the response body. After calling this you can no longer add headers to the response.\nIf, after calling, you want to change the status code, use the `response_status` procedure.\n\nFor bodies where you do not know the size or want an `io.Writer`, use the `response_writer_init`\nprocedure to create a writer.\n*/\nbody_set :: proc{\n\tbody_set_str,\n\tbody_set_bytes,\n}\n\n/*\nSets the status code with the safety of being able to do this after writing (part of) the body.\n*/\nresponse_status :: proc(r: ^Response, status: Status) {\n\tif r.status == status { return }\n\n\tr.status = status\n\n\t// If we have already written the heading, we can address the bytes directly to overwrite,\n\t// this is because of the fact that every status code is of length 3, and because we omit\n\t// the \"optional\" reason phrase out of the response.\n\tif bytes.buffer_length(&r._buf) > 0 {\n\t\tOFFSET :: len(\"HTTP/1.1 \")\n\n\t\tstatus_int_str := status_string(r.status)\n\t\tif len(status_int_str) < 4 {\n\t\t\tstatus_int_str = \"500 \"\n\t\t} else {\n\t\t\tstatus_int_str = status_int_str[0:4]\n\t\t}\n\n\t\tcopy(r._buf.buf[OFFSET:OFFSET + 4], status_int_str)\n\t}\n}\n\nResponse_Writer :: struct {\n\tr:     ^Response,\n\t// The writer you can write to.\n\tw:     io.Writer,\n\t// A dynamic wrapper over the `buffer` given in `response_writer_init`, doesn't allocate.\n\tbuf:   [dynamic]byte,\n\t// If destroy or close has been called.\n\tended: bool,\n}\n\n/*\nInitialize a writer you can use to write responses. Use the `body_set` procedure group if you have\na string or byte slice.\n\nThe buffer can be used to avoid very small writes, like the ones when you use the json package\n(each write in the json package is only a few bytes). You are allowed to pass nil which will disable\nbuffering.\n\nNOTE: You need to call io.destroy to signal the end of the body, OR io.close to send the response.\n*/\nresponse_writer_init :: proc(rw: ^Response_Writer, r: ^Response, buffer: []byte) -> io.Writer {\n\theaders_set_unsafe(&r.headers, \"transfer-encoding\", \"chunked\")\n\t_response_write_heading(r, -1)\n\n\trw.buf = slice.into_dynamic(buffer)\n\trw.r   = r\n\n\trw.w = io.Stream{\n\t\tprocedure = proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {\n\t\t\tws :: bytes.buffer_write_string\n\t\t\twrite_chunk :: proc(b: ^bytes.Buffer, chunk: []byte) {\n\t\t\t\tplen := i64(len(chunk))\n\t\t\t\tif plen == 0 { return }\n\n\t\t\t\tlog.debugf(\"response_writer chunk of size: %i\", plen)\n\n\t\t\t\tbytes.buffer_grow(b, 16)\n\t\t\t\tsize_buf := _dynamic_unwritten(b.buf)\n\t\t\t\tsize := strconv.write_int(size_buf, plen, 16)\n\t\t\t\t_dynamic_add_len(&b.buf, len(size))\n\n\t\t\t\tws(b, \"\\r\\n\")\n\t\t\t\tbytes.buffer_write(b, chunk)\n\t\t\t\tws(b, \"\\r\\n\")\n\t\t\t}\n\n\t\t\trw := (^Response_Writer)(stream_data)\n\t\t\tb := &rw.r._buf\n\n\t\t\t#partial switch mode {\n\t\t\tcase .Flush:\n\t\t\t\tassert(!rw.ended)\n\n\t\t\t\twrite_chunk(b, rw.buf[:])\n\t\t\t\tclear(&rw.buf)\n\t\t\t\treturn 0, nil\n\n\t\t\tcase .Destroy:\n\t\t\t\tassert(!rw.ended)\n\n\t\t\t\t// Write what is left.\n\t\t\t\twrite_chunk(b, rw.buf[:])\n\n\t\t\t\t// Signals the end of the body.\n\t\t\t\tws(b, \"0\\r\\n\\r\\n\")\n\n\t\t\t\trw.ended = true\n\t\t\t\treturn 0, nil\n\n\t\t\tcase .Close:\n\t\t\t\t// Write what is left.\n\t\t\t\twrite_chunk(b, rw.buf[:])\n\n\t\t\t\tif !rw.ended {\n\t\t\t\t\t// Signals the end of the body.\n\t\t\t\t\tws(b, \"0\\r\\n\\r\\n\")\n\t\t\t\t\trw.ended = true\n\t\t\t\t}\n\n\t\t\t\t// Send the response.\n\t\t\t\trespond(rw.r)\n\t\t\t\treturn 0, nil\n\n\t\t\tcase .Write:\n\t\t\t\tassert(!rw.ended)\n\n\t\t\t\t// No space, first write rw.buf, then check again for space, if still no space,\n\t\t\t\t// fully write the given p.\n\t\t\t\tif len(rw.buf) + len(p) > cap(rw.buf) {\n\t\t\t\t\twrite_chunk(b, rw.buf[:])\n\t\t\t\t\tclear(&rw.buf)\n\n\t\t\t\t\tif len(p) > cap(rw.buf) {\n\t\t\t\t\t\twrite_chunk(b, p)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tappend(&rw.buf, ..p)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Space, append bytes to the buffer.\n\t\t\t\t\tappend(&rw.buf, ..p)\n\t\t\t\t}\n\n\t\t\t\treturn i64(len(p)), .None\n\n\t\t\tcase .Query:\n\t\t\t\treturn io.query_utility({.Write, .Flush, .Destroy, .Close})\n\t\t\t}\n\t\t\treturn 0, .Empty\n\t\t},\n\t\tdata = rw,\n\t}\n\treturn rw.w\n}\n\n/*\nWrites the response status and headers to the buffer.\n\nThis is automatically called before writing anything to the Response.body or before calling a procedure\nthat sends the response.\n\nYou can pass `content_length < 0` to omit the content-length header, note that this header is\nrequired on most responses, but there are things like transfer-encodings that could leave it out.\n*/\n_response_write_heading :: proc(r: ^Response, content_length: int) {\n\tif r._heading_written { return }\n\tr._heading_written = true\n\n\tws   :: bytes.buffer_write_string\n\tconn := r._conn\n\tb    := &r._buf\n\n\tMIN             :: len(\"HTTP/1.1 200 \\r\\ndate: \\r\\ncontent-length: 1000\\r\\n\") + DATE_LENGTH\n\tAVG_HEADER_SIZE :: 20\n\treserve_size    := MIN + content_length + (AVG_HEADER_SIZE * headers_count(r.headers))\n\tbytes.buffer_grow(&r._buf, reserve_size)\n\n\t// According to RFC 7230 3.1.2 the reason phrase is insignificant,\n\t// because not doing so (and the fact that a status code is always length 3), we can change\n\t// the status code when we are already writing a body by just addressing the 3 bytes directly.\n\tstatus_int_str := status_string(r.status)\n\tif len(status_int_str) < 4 {\n\t\tstatus_int_str = \"500 \"\n\t} else {\n\t\tstatus_int_str = status_int_str[0:4]\n\t}\n\n\tws(b, \"HTTP/1.1 \")\n\tws(b, status_int_str)\n\tws(b, \"\\r\\n\")\n\n\t// Per RFC 9910 6.6.1 a Date header must be added in 2xx, 3xx, 4xx responses.\n\tif r.status >= .OK && r.status <= .Internal_Server_Error && !headers_has_unsafe(r.headers, \"date\") {\n\t\tws(b, \"date: \")\n\t\tws(b, server_date(conn.server))\n\t\tws(b, \"\\r\\n\")\n\t}\n\n\tif (\n\t\tcontent_length > -1                              &&\n\t\t!headers_has_unsafe(r.headers, \"content-length\") &&\n\t\tresponse_needs_content_length(r, conn) \\\n\t) {\n\t\tif content_length == 0 {\n\t\t\tws(b, \"content-length: 0\\r\\n\")\n\t\t} else {\n\t\t\tws(b, \"content-length: \")\n\n\t\t\tassert(content_length < 1000000000000000000 && content_length > -1000000000000000000)\n\t\t\tbuf: [20]byte\n\t\t\tws(b, strconv.write_int(buf[:], i64(content_length), 10))\n\t\t\tws(b, \"\\r\\n\")\n\t\t}\n\t}\n\n\tbstream := bytes.buffer_to_stream(b)\n\n\tfor header, value in r.headers._kv {\n\t\tws(b, header) // already has newlines escaped.\n\t\tws(b, \": \")\n\t\twrite_escaped_newlines(bstream, value)\n\t\tws(b, \"\\r\\n\")\n\t}\n\n\tfor cookie in r.cookies {\n\t\tcookie_write(bstream, cookie)\n\t\tws(b, \"\\r\\n\")\n\t}\n\n\t// Empty line denotes end of headers and start of body.\n\tws(b, \"\\r\\n\")\n}\n\n// Sends the response over the connection.\n// Frees the allocator (should be a request scoped allocator).\n// Closes the connection or starts the handling of the next request.\n@(private)\nresponse_send :: proc(r: ^Response, conn: ^Connection, loc := #caller_location) {\n\tassert(!r.sent, \"response has already been sent\", loc)\n\tr.sent = true\n\n\tcheck_body :: proc(res: rawptr, body: Body, err: Body_Error) {\n\t\tres := cast(^Response)res\n\t\twill_close: bool\n\n\t\tif err != nil {\n\t\t\t// Any read error should close the connection.\n\t\t\tresponse_status(res, body_error_status(err))\n\t\t\theaders_set_close(&res.headers)\n\t\t\twill_close = true\n\t\t}\n\n\t\tresponse_send_got_body(res, will_close)\n\t}\n\n\t// RFC 7230 6.3: A server MUST read\n\t// the entire request message body or close the connection after sending\n\t// its response, since otherwise the remaining data on a persistent\n\t// connection would be misinterpreted as the next request.\n\tif !response_must_close(&conn.loop.req, r) {\n\n\t\t// Body has been drained during handling.\n\t\tif _, got_body := conn.loop.req._body_ok.?; got_body {\n\t\t\tresponse_send_got_body(r, false)\n\t\t} else {\n\t\t\tbody(&conn.loop.req, Max_Post_Handler_Discard_Bytes, r, check_body)\n\t\t}\n\n\t} else {\n\t\tresponse_send_got_body(r, true)\n\t}\n}\n\n@(private)\nresponse_send_got_body :: proc(r: ^Response, will_close: bool) {\n\tconn := r._conn\n\n\tif will_close {\n\t\tif !connection_set_state(r._conn, .Will_Close) { return }\n\t}\n\n\tif bytes.buffer_length(&r._buf) == 0 {\n\t\t_response_write_heading(r, 0)\n\t}\n\n\tbuf := bytes.buffer_to_bytes(&r._buf)\n\tnbio.send_poly(conn.socket, {buf}, conn, on_response_sent)\n}\n\n\n@(private)\non_response_sent :: proc(op: ^nbio.Operation, conn: ^Connection) {\n\tif op.send.err != nil {\n\t\tlog.errorf(\"could not send response: %v\", op.send.err)\n\t\tif !connection_set_state(conn, .Will_Close) { return }\n\t}\n\n\tclean_request_loop(conn)\n}\n\n// Response has been sent, clean up and close/handle next.\n@(private)\nclean_request_loop :: proc(conn: ^Connection, close: Maybe(bool) = nil) {\n\tcontext.temp_allocator = virtual.arena_allocator(&conn.temp_allocator)\n\n\t// blocks, size, used := allocator_free_all(&conn.temp_allocator)\n\t// log.debugf(\"temp_allocator had %d blocks of a total size of %m of which %m was used\", blocks, size, used)\n\tfree_all(context.temp_allocator)\n\n\tscanner_reset(&conn.scanner)\n\n\tclient := conn.loop.req.client\n\tconn.loop.req = {}\n\tconn.loop.req.client = client\n\n\tconn.loop.res = {}\n\n\tif c, ok := close.?; (ok && c) || conn.state == .Will_Close {\n\t\tconnection_close(conn)\n\t} else {\n\t\tif !connection_set_state(conn, .Idle) { return }\n\t\tconn_handle_req(conn, context.temp_allocator)\n\t}\n}\n\n// A server MUST NOT send a Content-Length header field in any response\n// with a status code of 1xx (Informational) or 204 (No Content).  A\n// server MUST NOT send a Content-Length header field in any 2xx\n// (Successful) response to a CONNECT request.\n@(private)\nresponse_needs_content_length :: proc(r: ^Response, conn: ^Connection) -> bool {\n\tif status_is_informational(r.status) || r.status == .No_Content {\n\t\treturn false\n\t}\n\n\tif status_is_success(r.status) {\n\t\tline, _ := conn.loop.req.line.?\n\t\tif line.method == .Connect {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// Determines if the connection needs to be closed after sending the response.\n@(private)\nresponse_must_close :: proc(req: ^Request, res: ^Response) -> bool {\n\t// If the request we are responding to indicates it is closing the connection, close our side too.\n\tif req, req_has := headers_get_unsafe(req.headers, \"connection\"); req_has && req == \"close\" {\n\t\treturn true\n\t}\n\n\t// If we are responding with a close connection header, make sure we close.\n\tif res, res_has := headers_get_unsafe(res.headers, \"connection\"); res_has && res == \"close\" {\n\t\treturn true\n\t}\n\n\t// If the body was tried to be received, but failed, close.\n\tif body_ok, got_body := req._body_ok.?; got_body && !body_ok {\n\t\theaders_set_close(&res.headers)\n\t\treturn true\n\t}\n\n\t// If the connection's state indicates closing, close.\n\tif res._conn.state >= .Will_Close {\n\t\theaders_set_close(&res.headers)\n\t\treturn true\n\t}\n\n\t// HTTP 1.0 does not have persistent connections.\n\tline := req.line.?\n\tif line.version == {1, 0} {\n\t\treturn true\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "responses.odin",
    "content": "package http\n\nimport \"core:bytes\"\nimport \"core:encoding/json\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:nbio\"\nimport \"core:path/filepath\"\nimport \"core:strings\"\n\n// Sets the response to one that sends the given HTML.\nrespond_html :: proc(r: ^Response, html: string, status: Status = .OK, loc := #caller_location) {\n\tr.status = status\n\theaders_set_content_type(&r.headers, mime_to_content_type(Mime_Type.Html))\n\tbody_set(r, html, loc)\n\trespond(r, loc)\n}\n\n// Sets the response to one that sends the given plain text.\nrespond_plain :: proc(r: ^Response, text: string, status: Status = .OK, loc := #caller_location) {\n\tr.status = status\n\theaders_set_content_type(&r.headers, mime_to_content_type(Mime_Type.Plain))\n\tbody_set(r, text, loc)\n\trespond(r, loc)\n}\n\n/*\nSends the content of the file at the given path as the response.\n\nThis procedure uses non blocking IO and only allocates the size of the file in the body's buffer,\nno other allocations or temporary buffers, this is to make it as fast as possible.\n\nThe content type is taken from the path, optionally overwritten using the parameter.\n\nIf the file doesn't exist, a 404 response is sent.\nIf any other error occurs, a 500 is sent and the error is logged.\n*/\nrespond_file :: proc(r: ^Response, path: string, content_type: Maybe(Mime_Type) = nil, loc := #caller_location) {\n\t// PERF: we are still putting the content into the body buffer, we could stream it.\n\n\tassert_has_td(loc)\n\tassert(!r.sent, \"response has already been sent\", loc)\n\n\tmime := mime_from_extension(path)\n\tcontent_type := mime_to_content_type(mime)\n\theaders_set_content_type(&r.headers, content_type)\n\n\tnbio.open_poly(path, r, on_open)\n\n\ton_open :: proc(op: ^nbio.Operation, r: ^Response) {\n\t\t#partial switch op.open.err {\n\t\tcase .Not_Found:\n\t\t\tlog.debugf(\"respond_file, open %q, no such file or directory\", op.open.path)\n\t\t\trespond_with_status(r, .Not_Found)\n\t\tcase:\n\t\t\tlog.warnf(\"respond_file, open %q error: %i\", op.open.path, op.open.err)\n\t\t\trespond_with_status(r, .Not_Found)\n\t\tcase nil:\n\t\t\tnbio.stat_poly2(op.open.handle, op.open.path, r, on_stat)\n\t\t}\n\t}\n\n\ton_stat :: proc(op: ^nbio.Operation, path: string, r: ^Response) {\n\t\t#partial switch op.stat.err {\n\t\tcase:\n\t\t\tlog.errorf(\"respond_file, could not stat %q: %v\", path, op.stat.err)\n\t\t\tnbio.close(op.stat.handle)\n\t\t\trespond_with_status(r, .Not_Found)\n\t\tcase nil:\n\t\t\tassert(op.stat.size < i64(max(int)))\n\n\t\t\t_response_write_heading(r, int(op.stat.size))\n\n\t\t\tbytes.buffer_grow(&r._buf, int(op.stat.size))\n\t\t\tbuf := _dynamic_unwritten(r._buf.buf)[:op.stat.size]\n\n\t\t\tnbio.read_poly2(op.stat.handle, 0, buf, path, r, on_read, all=true)\n\t\t}\n\t}\n\n\ton_read :: proc(op: ^nbio.Operation, path: string, r: ^Response) {\n\t\tnbio.close(op.read.handle)\n\t\t#partial switch op.read.err {\n\t\tcase:\n\t\t\tlog.errorf(\"respond_file, could not read %q: %v\", path, op.read.err)\n\t\t\trespond_with_status(r, .Internal_Server_Error)\n\t\tcase nil:\n\t\t\t_dynamic_add_len(&r._buf.buf, op.read.read)\n\t\t\trespond_with_status(r, .OK)\n\t\t}\n\t}\n}\n\n/*\nResponds with the given content, determining content type from the given path.\n\nThis is very useful when you want to `#load(path)` at compile time and respond with that.\n*/\nrespond_file_content :: proc(r: ^Response, path: string, content: []byte, status: Status = .OK, loc := #caller_location) {\n\tmime := mime_from_extension(path)\n\tcontent_type := mime_to_content_type(mime)\n\n\tr.status = status\n\theaders_set_content_type(&r.headers, content_type)\n\tbody_set(r, content, loc)\n\trespond(r, loc)\n}\n\n/*\nSets the response to one that, based on the request path, returns a file.\nbase:    The base of the request path that should be removed when retrieving the file.\ntarget:  The path to the directory to serve.\nrequest: The request path.\n\nPath traversal is detected and cleaned up.\nThe Content-Type is set based on the file extension, see the MimeType enum for known file extensions.\n*/\nrespond_dir :: proc(r: ^Response, base, target, request: string, loc := #caller_location) {\n\tif !strings.has_prefix(request, base) {\n\t\trespond(r, Status.Not_Found)\n\t\treturn\n\t}\n\n\t// Detect path traversal attacks.\n\treq_clean, err_req   := filepath.clean(request, context.temp_allocator)\n\tbase_clean, err_base := filepath.clean(base, context.temp_allocator)\n\tif err_req != nil || err_base != nil || !strings.has_prefix(req_clean, base_clean) {\n\t\trespond(r, Status.Not_Found)\n\t\treturn\n\t}\n\n\tfile_path, _ := filepath.join([]string{\"./\", target, strings.trim_prefix(req_clean, base_clean)}, context.temp_allocator)\n\trespond_file(r, file_path, loc = loc)\n}\n\n// Sets the response to one that returns the JSON representation of the given value.\nrespond_json :: proc(r: ^Response, v: any, status: Status = .OK, opt: json.Marshal_Options = {}, loc := #caller_location) -> (err: json.Marshal_Error) {\n\topt := opt\n\n\tr.status = status\n\theaders_set_content_type(&r.headers, mime_to_content_type(Mime_Type.Json))\n\n\t// Going to write a MINIMUM of 128 bytes at a time.\n\trw:  Response_Writer\n\tbuf: [128]byte\n\tresponse_writer_init(&rw, r, buf[:])\n\n\t// Ends the body and sends the response.\n\tdefer io.close(rw.w)\n\n\tif err = json.marshal_to_writer(rw.w, v, &opt); err != nil {\n\t\theaders_set_close(&r.headers)\n\t\tresponse_status(r, .Internal_Server_Error)\n\t}\n\n\treturn\n}\n\n/*\nPrefer the procedure group `respond`.\n*/\nrespond_with_none :: proc(r: ^Response, loc := #caller_location) {\n\tassert_has_td(loc)\n\n\tconn := r._conn\n\treq  := conn.loop.req\n\n\t// Respond as head request if we set it to get.\n\tif rline, ok := req.line.(Requestline); ok && req.is_head && conn.server.opts.redirect_head_to_get {\n\t\trline.method = .Head\n\t}\n\n\tresponse_send(r, conn, loc)\n}\n\n/*\nPrefer the procedure group `respond`.\n*/\nrespond_with_status :: proc(r: ^Response, status: Status, loc := #caller_location) {\n\tresponse_status(r, status)\n\trespond(r, loc)\n}\n\n// Sends the response back to the client, handlers should call this.\nrespond :: proc {\n\trespond_with_none,\n\trespond_with_status,\n}\n"
  },
  {
    "path": "routing.odin",
    "content": "package http\n\nimport \"base:runtime\"\n\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:strings\"\nimport \"core:text/match\"\n\nURL :: struct {\n\traw:    string, // All other fields are views/slices into this string.\n\tscheme: string,\n\thost:   string,\n\tpath:   string,\n\tquery:  string,\n}\n\nurl_parse :: proc(raw: string) -> (url: URL) {\n\turl.raw = raw\n\ts := raw\n\n\t// Per RFC 3986 3.4 the query component can contain both ':' and '/' characters unescaped.\n\t// Since the scheme may be absent in a HTTP request line, the query should be separated first.\n\ti := strings.index(s, \"?\")\n\tif i != -1 {\n\t\turl.query = s[i+1:]\n\t\ts = s[:i]\n\t}\n\n\ti = strings.index(s, \"://\")\n\tif i >= 0 {\n\t\turl.scheme = s[:i]\n\t\ts = s[i+3:]\n\t}\n\n\ti = strings.index(s, \"/\")\n\tif i == -1 {\n\t\turl.host = s\n\t} else {\n\t\turl.host = s[:i]\n\t\turl.path = s[i:]\n\t}\n\n\treturn\n}\n\nQuery_Entry :: struct {\n\tkey, value: string,\n}\n\nquery_iter :: proc(query: ^string) -> (entry: Query_Entry, ok: bool) {\n\tif len(query) == 0 { return }\n\n\tok = true\n\n\tparam: string\n\ti := strings.index(query^, \"&\")\n\tif i < 0 {\n\t\tparam = query^\n\t\tquery^ = \"\"\n\t} else {\n\t\tparam = query[:i]\n\t\tquery^ = query[i+1:]\n\t}\n\n\ti = strings.index(param, \"=\")\n\tif i < 0 {\n\t\tentry.key = param\n\t\tentry.value = \"\"\n\t\treturn\n\t}\n\n\tentry.key = param[:i]\n\tentry.value = param[i+1:]\n\n\treturn\n}\n\nquery_get :: proc(url: URL, key: string) -> (val: string, ok: bool) #optional_ok {\n\tq := url.query\n\tfor entry in #force_inline query_iter(&q) {\n\t\tif entry.key == key {\n\t\t\treturn entry.value, true\n\t\t}\n\t}\n\treturn\n}\n\nquery_get_percent_decoded :: proc(url: URL, key: string, allocator := context.temp_allocator) -> (val: string, ok: bool) {\n\tstr := query_get(url, key) or_return\n\treturn net.percent_decode(str, allocator)\n}\n\nquery_get_bool :: proc(url: URL, key: string) -> (result, set: bool) #optional_ok {\n\tstr := query_get(url, key) or_return\n\tset = true\n\tswitch str {\n\tcase \"\", \"false\", \"0\", \"no\":\n\tcase:\n\t\tresult = true\n\t}\n\treturn\n}\n\nquery_get_int :: proc(url: URL, key: string, base := 0) -> (result: int, ok: bool, set: bool) {\n\tstr := query_get(url, key) or_return\n\tset = true\n\tresult, ok = strconv.parse_int(str, base)\n\treturn\n}\n\nquery_get_uint :: proc(url: URL, key: string, base := 0) -> (result: uint, ok: bool, set: bool) {\n\tstr := query_get(url, key) or_return\n\tset = true\n\tresult, ok = strconv.parse_uint(str, base)\n\treturn\n}\n\nRoute :: struct {\n\thandler: Handler,\n\tpattern: string,\n}\n\nRouter :: struct {\n\tallocator: runtime.Allocator,\n\troutes:    map[Method][dynamic]Route,\n\tall:       [dynamic]Route,\n}\n\nrouter_init :: proc(router: ^Router, allocator := context.allocator) {\n\trouter.allocator = allocator\n\trouter.routes = make(map[Method][dynamic]Route, len(Method), allocator)\n}\n\nrouter_destroy :: proc(router: ^Router) {\n\tcontext.allocator = router.allocator\n\n\tfor route in router.all {\n\t\tdelete(route.pattern)\n\t}\n\tdelete(router.all)\n\n\tfor _, routes in router.routes {\n\t\tfor route in routes {\n\t\t\tdelete(route.pattern)\n\t\t}\n\n\t\tdelete(routes)\n\t}\n\n\tdelete(router.routes)\n}\n\n// Returns a handler that matches against the given routes.\nrouter_handler :: proc(router: ^Router) -> Handler {\n\th: Handler\n\th.user_data = router\n\n\th.handle = proc(handler: ^Handler, req: ^Request, res: ^Response) {\n\t\trouter := (^Router)(handler.user_data)\n\t\trline := req.line.(Requestline)\n\n\t\tif routes_try(router.routes[rline.method], req, res) {\n\t\t\treturn\n\t\t}\n\n\t\tif routes_try(router.all, req, res) {\n\t\t\treturn\n\t\t}\n\n\t\tlog.infof(\"no route matched %s %s\", method_string(rline.method), rline.target)\n\t\tres.status = .Not_Found\n\t\trespond(res)\n\t}\n\n\treturn h\n}\n\nroute_get :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Get,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_post :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Post,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\n// NOTE: this does not get called when `Server_Opts.redirect_head_to_get` is set to true.\nroute_head :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Head,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_put :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Put,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_patch :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Patch,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_trace :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Trace,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_delete :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Delete,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_connect :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Connect,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\nroute_options :: proc(router: ^Router, pattern: string, handler: Handler) {\n\troute_add(\n\t\trouter,\n\t\t.Options,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\n// Adds a catch-all fallback route (all methods, ran if no other routes match).\nroute_all :: proc(router: ^Router, pattern: string, handler: Handler) {\n\tif router.all == nil {\n\t\trouter.all = make([dynamic]Route, 0, 1, router.allocator)\n\t}\n\n\tappend(\n\t\t&router.all,\n\t\tRoute{handler = handler, pattern = strings.concatenate([]string{\"^\", pattern, \"$\"}, router.allocator)},\n\t)\n}\n\n@(private)\nroute_add :: proc(router: ^Router, method: Method, route: Route) {\n\tif method not_in router.routes {\n\t\trouter.routes[method] = make([dynamic]Route, router.allocator)\n\t}\n\n\tappend(&router.routes[method], route)\n}\n\n@(private)\nroutes_try :: proc(routes: [dynamic]Route, req: ^Request, res: ^Response) -> bool {\n\ttry_captures: [match.MAX_CAPTURES]match.Match = ---\n\tfor route in routes {\n\t\tn, err := match.find_aux(req.url.path, route.pattern, 0, true, &try_captures)\n\t\tif err != .OK {\n\t\t\tlog.errorf(\"Error matching route: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif n > 0 {\n\t\t\tcaptures := make([]string, n - 1, context.temp_allocator)\n\t\t\tfor cap, i in try_captures[1:n] {\n\t\t\t\tcaptures[i] = req.url.path[cap.byte_start:cap.byte_end]\n\t\t\t}\n\n\t\t\treq.url_params = captures\n\t\t\trh := route.handler\n\t\t\trh.handle(&rh, req, res)\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "scanner.odin",
    "content": "#+private\npackage http\n\nimport \"core:mem/virtual\"\nimport \"base:intrinsics\"\n\nimport \"core:bufio\"\nimport \"core:nbio\"\nimport \"core:net\"\n\nScan_Callback :: #type proc(user_data: rawptr, token: string, err: bufio.Scanner_Error)\nSplit_Proc    :: #type proc(split_data: rawptr, data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool)\n\nscan_lines :: proc(split_data: rawptr, data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {\n\treturn bufio.scan_lines(data, at_eof)\n}\n\nscan_num_bytes :: proc(split_data: rawptr, data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {\n\tassert(split_data != nil)\n\tn := int(uintptr(split_data))\n\tassert(n >= 0)\n\n\tif at_eof && len(data) < n {\n\t\treturn\n\t}\n\n\tif len(data) < n {\n\t\treturn\n\t}\n\n\treturn n, data[:n], nil, false\n}\n\n// A callback based scanner over the connection based on nbio.\nScanner :: struct /* #no_copy */ {\n\tconnection:                   ^Connection,\n\tsplit:                        Split_Proc,\n\tsplit_data:                   rawptr,\n\tbuf:                          [dynamic]byte,\n\tmax_token_size:               int,\n\tstart:                        int,\n\tend:                          int,\n\ttoken:                        []byte,\n\t_err:                         bufio.Scanner_Error,\n\tconsecutive_empty_reads:      int,\n\tmax_consecutive_empty_reads:  int,\n\tsuccessive_empty_token_count: int,\n\tdone:                         bool,\n\tcould_be_too_short:           bool,\n\tuser_data:                    rawptr,\n\tcallback:                     Scan_Callback,\n}\n\nINIT_BUF_SIZE :: 1024\nDEFAULT_MAX_CONSECUTIVE_EMPTY_READS :: 128\n\nscanner_init :: proc(s: ^Scanner, c: ^Connection, buf_allocator := context.allocator) {\n\ts.connection     = c\n\ts.split          = scan_lines\n\ts.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\ts.buf.allocator  = buf_allocator\n}\n\nscanner_destroy :: proc(s: ^Scanner) {\n\tdelete(s.buf)\n}\n\nscanner_reset :: proc(s: ^Scanner) {\n\tremove_range(&s.buf, 0, s.start)\n\ts.end   -= s.start\n\ts.start  = 0\n\n\ts.split                        = scan_lines\n\ts.split_data                   = nil\n\ts.max_token_size               = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\ts.token                        = nil\n\ts._err                         = nil\n\ts.consecutive_empty_reads      = 0\n\ts.max_consecutive_empty_reads  = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS\n\ts.successive_empty_token_count = 0\n\ts.done                         = false\n\ts.could_be_too_short           = false\n\ts.user_data                    = nil\n\ts.callback                     = nil\n}\n\nscanner_scan :: proc(\n\ts: ^Scanner,\n\tuser_data: rawptr,\n\tcallback: proc(user_data: rawptr, token: string, err: bufio.Scanner_Error),\n) {\n\tset_err :: proc(s: ^Scanner, err: bufio.Scanner_Error) {\n\t\tswitch s._err {\n\t\tcase nil, .EOF:\n\t\t\ts._err = err\n\t\t}\n\t}\n\n\tif s.done {\n\t\tcallback(user_data, \"\", .EOF)\n\t\treturn\n\t}\n\n\t// Check if a token is possible with what is available\n\t// Allow the split procedure to recover if it fails\n\tif s.start < s.end || s._err != nil {\n\t\tadvance, token, err, final_token := s.split(s.split_data, s.buf[s.start:s.end], s._err != nil)\n\t\tif final_token {\n\t\t\ts.token = token\n\t\t\ts.done = true\n\t\t\tcallback(user_data, \"\", .EOF)\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tset_err(s, err)\n\t\t\tcallback(user_data, \"\", s._err)\n\t\t\treturn\n\t\t}\n\n\t\t// Do advance\n\t\tif advance < 0 {\n\t\t\tset_err(s, .Negative_Advance)\n\t\t\tcallback(user_data, \"\", s._err)\n\t\t\treturn\n\t\t}\n\t\tif advance > s.end - s.start {\n\t\t\tset_err(s, .Advanced_Too_Far)\n\t\t\tcallback(user_data, \"\", s._err)\n\t\t\treturn\n\t\t}\n\t\ts.start += advance\n\n\t\ts.token = token\n\t\tif s.token != nil {\n\t\t\tif s._err == nil || advance > 0 {\n\t\t\t\ts.successive_empty_token_count = 0\n\t\t\t} else {\n\t\t\t\ts.successive_empty_token_count += 1\n\n\t\t\t\tif s.successive_empty_token_count > s.max_consecutive_empty_reads {\n\t\t\t\t\tset_err(s, .No_Progress)\n\t\t\t\t\tcallback(user_data, \"\", s._err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ts.consecutive_empty_reads = 0\n\t\t\ts.callback = nil\n\t\t\ts.user_data = nil\n\t\t\tcallback(user_data, string(token), s._err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// If an error is hit, no token can be created\n\tif s._err != nil {\n\t\ts.start = 0\n\t\ts.end = 0\n\t\tcallback(user_data, \"\", s._err)\n\t\treturn\n\t}\n\n\tcould_be_too_short := false\n\n\t// Resize the buffer if full\n\tif s.end == len(s.buf) {\n\t\tif s.max_token_size <= 0 {\n\t\t\ts.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\t\t}\n\n\t\tif s.end - s.start >= s.max_token_size {\n\t\t\tset_err(s, .Too_Long)\n\t\t\tcallback(user_data, \"\", s._err)\n\t\t\treturn\n\t\t}\n\n\t\t// TODO: write over the part of the buffer already used\n\n\t\t// overflow check\n\t\tnew_size := INIT_BUF_SIZE\n\t\tif len(s.buf) > 0 {\n\t\t\toverflowed: bool\n\t\t\tif new_size, overflowed = intrinsics.overflow_mul(len(s.buf), 2); overflowed {\n\t\t\t\tset_err(s, .Too_Long)\n\t\t\t\tcallback(user_data, \"\", s._err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\told_size := len(s.buf)\n\t\tresize(&s.buf, new_size)\n\n\t\tcould_be_too_short = old_size >= len(s.buf)\n\n\t}\n\n\t// Read data into the buffer\n\ts.consecutive_empty_reads += 1\n\ts.user_data = user_data\n\ts.callback = callback\n\ts.could_be_too_short = could_be_too_short\n\n\tassert_has_td()\n\t// TODO: some kinda timeout on this.\n\tnbio.recv_poly(s.connection.socket, {s.buf[s.end:len(s.buf)]}, s, scanner_on_read)\n}\n\nscanner_on_read :: proc(op: ^nbio.Operation, s: ^Scanner) {\n\tcontext.temp_allocator = virtual.arena_allocator(&s.connection.temp_allocator)\n\n\tdefer scanner_scan(s, s.user_data, s.callback)\n\n\tif op.recv.err != nil {\n\t\t#partial switch op.recv.err.(net.TCP_Recv_Error) {\n\t\tcase .Connection_Closed, .Invalid_Argument:\n\t\t\t// EBADF (bad file descriptor) happens when OS closes socket.\n\t\t\ts._err = .EOF\n\t\t\treturn\n\t\t}\n\n\t\ts._err = .Unknown\n\t\treturn\n\t}\n\n\t// When n == 0, connection is closed or buffer is of length 0.\n\tif op.recv.received == 0 {\n\t\ts._err = .EOF\n\t\treturn\n\t}\n\n\tif op.recv.received < 0 || len(s.buf) - s.end < op.recv.received {\n\t\ts._err = .Bad_Read_Count\n\t\treturn\n\t}\n\n\ts.end += op.recv.received\n\tif op.recv.received > 0 {\n\t\ts.successive_empty_token_count = 0\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "server.odin",
    "content": "package http\n\nimport \"base:runtime\"\n\nimport \"core:bufio\"\nimport \"core:bytes\"\nimport \"core:c/libc\"\nimport \"core:fmt\"\nimport \"core:log\"\nimport \"core:mem\"\nimport \"core:mem/virtual\"\nimport \"core:nbio\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:slice\"\nimport \"core:sync\"\nimport \"core:thread\"\nimport \"core:time\"\n\nServer_Opts :: struct {\n\t// Whether the server should accept every request that sends a \"Expect: 100-continue\" header automatically.\n\t// Defaults to true.\n\tauto_expect_continue:    bool,\n\t// When this is true, any HEAD request is automatically redirected to the handler as a GET request.\n\t// Then, when the response is sent, the body is removed from the response.\n\t// Defaults to true.\n\tredirect_head_to_get:    bool,\n\t// Limit the maximum number of bytes to read for the request line (first line of request containing the URI).\n\t// The HTTP spec does not specify any limits but in practice it is safer.\n\t// RFC 7230 3.1.1 says:\n\t// Various ad hoc limitations on request-line length are found in\n\t// practice.  It is RECOMMENDED that all HTTP senders and recipients\n\t// support, at a minimum, request-line lengths of 8000 octets.\n\t// defaults to 8000.\n\tlimit_request_line:      int,\n\t// Limit the length of the headers.\n\t// The HTTP spec does not specify any limits but in practice it is safer.\n\t// defaults to 8000.\n\tlimit_headers:           int,\n\t// The thread count to use, defaults to your core count - 1.\n\tthread_count:            int,\n\n\t// // The initial size of the temp_allocator for each connection, defaults to 256KiB and doubles\n\t// // each time it needs to grow.\n\t// // NOTE: this value is assigned globally, running multiple servers with a different value will\n\t// // not work.\n\t// initial_temp_block_cap:  uint,\n\t// // The amount of free blocks each thread is allowed to hold on to before deallocating excess.\n\t// // Defaults to 64.\n\t// max_free_blocks_queued:  uint,\n}\n\nDefault_Server_Opts := Server_Opts {\n\tauto_expect_continue    = true,\n\tredirect_head_to_get    = true,\n\tlimit_request_line      = 8000,\n\tlimit_headers           = 8000,\n\t// initial_temp_block_cap  = 256 * mem.Kilobyte,\n\t// max_free_blocks_queued  = 64,\n}\n\nServer_State :: enum {\n\tUninitialized,\n\tIdle,\n\tListening,\n\tServing,\n\tRunning,\n\tClosing,\n\tCleaning,\n\tClosed,\n}\n\nServer :: struct {\n\topts:           Server_Opts,\n\ttcp_sock:       net.TCP_Socket,\n\tconn_allocator: mem.Allocator,\n\thandler:        Handler,\n\n\tthreads:        []Server_Thread,\n\t// Once the server starts closing/shutdown this is set to true, all threads will check it\n\t// and start their thread local shutdown procedure.\n\t//\n\t// NOTE: This is only ever set from false to true, and checked repeatedly,\n\t// so it doesn't have to be atomic, this is purely to keep the thread sanitizer happy.\n\tclosing:        Atomic(bool),\n\t// Threads will decrement the wait group when they have fully closed/shutdown.\n\t// The main thread waits on this to clean up global data and return.\n\tthreads_closed: sync.Wait_Group,\n\n\t// Updated every second with an updated date, this speeds up the server considerably\n\t// because it would otherwise need to call time.now() and format the date on each response.\n\tdate:           Server_Date,\n}\n\nServer_Thread :: struct {\n\tthread:     ^thread.Thread,\n\tevent_loop: ^nbio.Event_Loop,\n\tconns:      map[net.TCP_Socket]^Connection,\n\tstate:      Server_State,\n\taccept:     ^nbio.Operation,\n\n\t// free_temp_blocks:       map[int]queue.Queue(^Block),\n\t// free_temp_blocks_count: int,\n}\n\n@(private, disabled = ODIN_DISABLE_ASSERT)\nassert_has_td :: #force_inline proc(loc := #caller_location) {\n\tassert(td.state != .Uninitialized, \"The thread you are calling from is not a server/handler thread\", loc)\n}\n\n@(thread_local)\ntd: ^Server_Thread\n\nDefault_Endpoint := net.Endpoint {\n\taddress = net.IP4_Any,\n\tport    = 8080,\n}\n\nlisten :: proc(\n\ts: ^Server,\n\tendpoint: net.Endpoint = Default_Endpoint,\n\topts: Server_Opts = Default_Server_Opts,\n) -> (err: net.Network_Error) {\n\ts.opts = opts\n\ts.conn_allocator = context.allocator\n\t// initial_block_cap = int(s.opts.initial_temp_block_cap)\n\t// max_free_blocks_queued = int(s.opts.max_free_blocks_queued)\n\n\tacquire_err := nbio.acquire_thread_event_loop()\n\t// TODO: error handling.\n\tassert(acquire_err == nil)\n\n\ts.tcp_sock, err = nbio.listen_tcp(endpoint)\n\tif err != nil {\n\t\tnbio.run()\n\t\tnbio.release_thread_event_loop()\n\t\tserver_shutdown(s)\n\t}\n\treturn\n}\n\nserve :: proc(s: ^Server, h: Handler) -> (err: net.Network_Error) {\n\tif atomic_load(&s.closing) { return }\n\ts.handler = h\n\n\tif s.opts.thread_count == 0 {\n\t\ts.opts.thread_count = os.get_processor_core_count()\n\t}\n\n\tthread_count := max(1, s.opts.thread_count)\n\tsync.wait_group_add(&s.threads_closed, thread_count)\n\ts.threads = make([]Server_Thread, thread_count, s.conn_allocator)\n\tfor &td in s.threads[1:] {\n\t\ttd.thread = thread.create_and_start_with_poly_data2(s, &td, _server_thread_init, context)\n\t}\n\n\t// Start keeping track of and caching the date for the required date header.\n\tserver_date_start(s)\n\n\t_server_thread_init(s, &s.threads[0])\n\n\tsync.wait(&s.threads_closed)\n\n\tlog.debug(\"server threads are done, shutting down\")\n\n\tnet.shutdown(s.tcp_sock, .Both)\n\tnet.close(s.tcp_sock)\n\tfor t in s.threads[1:] { thread.destroy(t.thread) }\n\tdelete(s.threads)\n\n\treturn nil\n}\n\nlisten_and_serve :: proc(\n\ts: ^Server,\n\th: Handler,\n\tendpoint: net.Endpoint = Default_Endpoint,\n\topts: Server_Opts = Default_Server_Opts,\n) -> (err: net.Network_Error) {\n\tlisten(s, endpoint, opts) or_return\n\treturn serve(s, h)\n}\n\n_server_thread_init :: proc(s: ^Server, ttd: ^Server_Thread) {\n\ttd = ttd\n\n\ttd.conns = make(map[net.TCP_Socket]^Connection)\n\t// td.free_temp_blocks = make(map[int]queue.Queue(^Block))\n\n\tif td != &s.threads[0] {\n\t\terr := nbio.acquire_thread_event_loop()\n\t\t// TODO: error handling.\n\t\tassert(err == nil)\n\t}\n\n\ttd.event_loop = nbio.current_thread_event_loop()\n\n\tlog.debug(\"accepting connections\")\n\n\ttd.accept = nbio.accept_poly(s.tcp_sock, s, on_accept)\n\n\tlog.debug(\"starting event loop\")\n\ttd.state = .Serving\n\tfor {\n\t\tif atomic_load(&s.closing) { _server_thread_shutdown(s) }\n\t\tif td.state == .Closed { break }\n\t\tif td.state == .Cleaning { continue }\n\n\t\terr := nbio.tick()\n\t\tif err != nil {\n\t\t\tlog.errorf(\"non-blocking io tick error: %v\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.debug(\"event loop end\")\n\n\tif td != &s.threads[0] {\n\t\truntime.default_temp_allocator_destroy(auto_cast context.temp_allocator.data)\n\t}\n\tsync.wait_group_done(&s.threads_closed)\n}\n\n\n// The time between checks and closes of connections in a graceful shutdown.\n@(private)\nSHUTDOWN_INTERVAL :: time.Millisecond * 100\n\n// Starts a graceful shutdown.\n//\n// Some error logs will be generated but all active connections are finished\n// before closing them and all connections and threads are freed.\n//\n// 1. Stops 'server_start' from accepting new connections.\n// 2. Close and free non-active connections.\n// 3. Repeat 2 every SHUTDOWN_INTERVAL until no more connections are open.\n// 4. Close the main socket.\n// 5. Signal 'server_start' it can return.\nserver_shutdown :: proc(s: ^Server) {\n\tatomic_store(&s.closing, true)\n\tfor t in s.threads {\n\t\tnbio.wake_up(t.event_loop)\n\t}\n}\n\n_server_thread_shutdown :: proc(s: ^Server, loc := #caller_location) {\n\tassert_has_td(loc)\n\n\ttd.state = .Closing\n\tdefer delete(td.conns)\n\t// defer {\n\t// \tblocks: int\n\t// \tfor _, &bucket in td.free_temp_blocks {\n\t// \t\tfor block in queue.pop_front_safe(&bucket) {\n\t// \t\t\tblocks += 1\n\t// \t\t\tfree(block)\n\t// \t\t}\n\t// \t\tqueue.destroy(&bucket)\n\t// \t}\n\t// \tdelete(td.free_temp_blocks)\n\t// \tlog.infof(\"had %i temp blocks to spare\", blocks)\n\t// }\n\n\tfor {\n\t\tfor sock, conn in td.conns {\n\t\t\t#partial switch conn.state {\n\t\t\tcase .Active:\n\t\t\t\tlog.infof(\"shutdown: connection %i still active\", sock)\n\t\t\tcase .New, .Idle, .Pending:\n\t\t\t\tlog.infof(\"shutdown: closing connection %i\", sock)\n\t\t\t\tconnection_close(conn)\n\t\t\tcase .Closing:\n\t\t\t\tlog.debugf(\"shutdown: connection %i is closing\", sock)\n\t\t\tcase .Closed:\n\t\t\t\tlog.warn(\"closed connection in connections map, maybe a race or logic error\")\n\t\t\t}\n\t\t}\n\n\t\tif len(td.conns) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\terr := nbio.tick()\n\t\tfmt.assertf(err == nil, \"IO tick error during shutdown: %v\")\n\t}\n\n\ttd.state = .Cleaning\n\n\tnbio.remove(td.accept)\n\ttd.accept = nil\n\n\tnbio.run()\n\tnbio.release_thread_event_loop()\n\n\ttd.state = .Closed\n\n\tlog.info(\"shutdown: done\")\n}\n\n@(private)\non_interrupt_server: ^Server\n@(private)\non_interrupt_context: runtime.Context\n\n// Registers a signal handler to shutdown the server gracefully on interrupt signal.\n// Can only be called once in the lifetime of the program because of a hacky interaction with libc.\nserver_shutdown_on_interrupt :: proc(s: ^Server) {\n\ton_interrupt_server = s\n\ton_interrupt_context = context\n\n\tlibc.signal(\n\t\tlibc.SIGINT,\n\t\tproc \"cdecl\" (_: i32) {\n\t\t\tcontext = on_interrupt_context\n\n\t\t\t// Force close on second signal.\n\t\t\tif td.state == .Closing {\n\t\t\t\tos.exit(1)\n\t\t\t}\n\n\t\t\tserver_shutdown(on_interrupt_server)\n\t\t},\n\t)\n}\n\n// Taken from Go's implementation,\n// The maximum amount of bytes we will read (if handler did not)\n// in order to get the connection ready for the next request.\n@(private)\nMax_Post_Handler_Discard_Bytes :: 256 << 10\n\n// How long to wait before actually closing a connection.\n// This is to make sure the client can fully receive the response.\n@(private)\nConn_Close_Delay :: time.Millisecond * 500\n\nConnection_State :: enum {\n\tPending, // Pending a client to attach.\n\tNew, // Got client, waiting to service first request.\n\tActive, // Servicing request.\n\tIdle, // Waiting for next request.\n\tWill_Close, // Closing after the current response is sent.\n\tClosing, // Going to close, cleaning up.\n\tClosed, // Fully closed.\n}\n\n@(private)\nconnection_set_state :: proc(c: ^Connection, s: Connection_State) -> bool {\n\tif s < .Closing && c.state >= .Closing {\n\t\treturn false\n\t}\n\n\tif s == .Closing && c.state == .Closed {\n\t\treturn false\n\t}\n\n\tc.state = s\n\treturn true\n}\n\n// TODO/PERF: pool the connections, saves having to allocate scanner buf and temp_allocator every time.\nConnection :: struct {\n\tserver:         ^Server,\n\tsocket:         net.TCP_Socket,\n\tstate:          Connection_State,\n\tscanner:        Scanner,\n\ttemp_allocator: virtual.Arena,\n\tloop:           Loop,\n}\n\n// Loop/request cycle state.\n@(private)\nLoop :: struct {\n\tconn: ^Connection,\n\treq:  Request,\n\tres:  Response,\n}\n\n@(private)\nconnection_close :: proc(c: ^Connection, loc := #caller_location) {\n\tassert_has_td(loc)\n\n\tif c.state >= .Closing {\n\t\tlog.infof(\"connection %i already closing/closed\", c.socket)\n\t\treturn\n\t}\n\n\tlog.debugf(\"closing connection: %i\", c.socket)\n\n\tc.state = .Closing\n\n\t// RFC 7230 6.6.\n\n\t// Close read side of the connection, then wait a little bit, allowing the client\n\t// to process the closing and receive any remaining data.\n\tnet.shutdown(c.socket, net.Shutdown_Manner.Send)\n\n\tnbio.timeout_poly(Conn_Close_Delay, c, proc(_: ^nbio.Operation, c: ^Connection) {\n\t\tnbio.close_poly(c.socket, c, proc(_: ^nbio.Operation, c: ^Connection) {\n\t\t\tlog.debugf(\"closed connection: %i\", c.socket)\n\n\t\t\tc.state = .Closed\n\n\t\t\t// allocator_destroy(&c.temp_allocator)\n\t\t\tvirtual.arena_destroy(&c.temp_allocator)\n\n\t\t\tscanner_destroy(&c.scanner)\n\t\t\tdelete_key(&td.conns, c.socket)\n\t\t\tfree(c, c.server.conn_allocator)\n\t\t})\n\t})\n}\n\n@(private)\non_accept :: proc(op: ^nbio.Operation, server: ^Server) {\n\ttd.accept = nil\n\n\tif op.accept.err != nil {\n\t\t#partial switch op.accept.err {\n\t\tcase .Insufficient_Resources:\n\t\t\tlog.error(\"Connection limit reached, trying again in a bit\")\n\t\t\tnbio.timeout_poly(time.Second, server, proc(_: ^nbio.Operation, server: ^Server) {\n\t\t\t\ttd.accept = nbio.accept_poly(server.tcp_sock, server, on_accept)\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tfmt.panicf(\"accept error: %v\", op.accept.err)\n\t}\n\n\t// Accept next connection.\n\ttd.accept = nbio.accept_poly(server.tcp_sock, server, on_accept)\n\n\tc := new(Connection, server.conn_allocator)\n\tc.state = .New\n\tc.server = server\n\tc.socket = op.accept.client\n\tc.loop.req.client = op.accept.client_endpoint\n\n\ttd.conns[c.socket] = c\n\n\tlog.debugf(\"new connection with thread, got %d conns\", len(td.conns))\n\tconn_handle_reqs(c)\n}\n\n@(private)\nconn_handle_reqs :: proc(c: ^Connection) {\n\t// TODO/PERF: not sure why this is allocated on the connections allocator, can't it use the arena?\n\tscanner_init(&c.scanner, c, c.server.conn_allocator)\n\n\t// allocator_init(&c.temp_allocator, c.server.conn_allocator)\n\t// context.temp_allocator = allocator(&c.temp_allocator)\n\terr := virtual.arena_init_growing(&c.temp_allocator)\n\tassert(err == nil)\n\tcontext.temp_allocator = virtual.arena_allocator(&c.temp_allocator)\n\n\tconn_handle_req(c, context.temp_allocator)\n}\n\n@(private)\nconn_handle_req :: proc(c: ^Connection, allocator := context.temp_allocator) {\n\ton_rline1 :: proc(loop: rawptr, token: string, err: bufio.Scanner_Error) {\n\t\tl := cast(^Loop)loop\n\n\t\tif !connection_set_state(l.conn, .Active) { return }\n\n\t\tif err != nil {\n\t\t\tif err == .EOF {\n\t\t\t\tlog.debugf(\"client disconnected (EOF)\")\n\t\t\t} else {\n\t\t\t\tlog.warnf(\"request scanner error: %v\", err)\n\t\t\t}\n\n\t\t\tclean_request_loop(l.conn, close = true)\n\t\t\treturn\n\t\t}\n\n\t\t// In the interest of robustness, a server that is expecting to receive\n\t\t// and parse a request-line SHOULD ignore at least one empty line (CRLF)\n\t\t// received prior to the request-line.\n\t\tif len(token) == 0 {\n\t\t\tlog.debug(\"first request line empty, skipping in interest of robustness\")\n\t\t\tscanner_scan(&l.conn.scanner, loop, on_rline2)\n\t\t\treturn\n\t\t}\n\n\t\ton_rline2(loop, token, err)\n\t}\n\n\ton_rline2 :: proc(loop: rawptr, token: string, err: bufio.Scanner_Error) {\n\t\tl := cast(^Loop)loop\n\n\t\tif err != nil {\n\t\t\tlog.warnf(\"request scanning error: %v\", err)\n\t\t\tclean_request_loop(l.conn, close = true)\n\t\t\treturn\n\t\t}\n\n\t\trline, err := requestline_parse(token, context.temp_allocator)\n\t\tswitch err {\n\t\tcase .Method_Not_Implemented:\n\t\t\tlog.infof(\"request-line %q invalid method\", token)\n\t\t\theaders_set_close(&l.res.headers)\n\t\t\tl.res.status = .Not_Implemented\n\t\t\trespond(&l.res)\n\t\t\treturn\n\t\tcase .Invalid_Version_Format, .Not_Enough_Fields:\n\t\t\tlog.warnf(\"request-line %q invalid: %s\", token, err)\n\t\t\tclean_request_loop(l.conn, close = true)\n\t\t\treturn\n\t\tcase .None:\n\t\t\tl.req.line = rline\n\t\t}\n\n\t\t// Might need to support more versions later.\n\t\tif rline.version.major != 1 || rline.version.minor > 1 {\n\t\t\tlog.infof(\"request http version not supported %v\", rline.version)\n\t\t\theaders_set_close(&l.res.headers)\n\t\t\tl.res.status = .HTTP_Version_Not_Supported\n\t\t\trespond(&l.res)\n\t\t\treturn\n\t\t}\n\n\t\tl.req.url = url_parse(rline.target.(string))\n\n\t\tl.conn.scanner.max_token_size = l.conn.server.opts.limit_headers\n\t\tscanner_scan(&l.conn.scanner, loop, on_header_line)\n\t}\n\n\ton_header_line :: proc(loop: rawptr, token: string, err: bufio.Scanner_Error) {\n\t\tl := cast(^Loop)loop\n\n\t\tif err != nil {\n\t\t\tlog.warnf(\"request scanning error: %v\", err)\n\t\t\tclean_request_loop(l.conn, close = true)\n\t\t\treturn\n\t\t}\n\n\t\t// The first empty line denotes the end of the headers section.\n\t\tif len(token) == 0 {\n\t\t\ton_headers_end(l)\n\t\t\treturn\n\t\t}\n\n\t\tif _, ok := header_parse(&l.req.headers, token); !ok {\n\t\t\tlog.warnf(\"header-line %s is invalid\", token)\n\t\t\theaders_set_close(&l.res.headers)\n\t\t\tl.res.status = .Bad_Request\n\t\t\trespond(&l.res)\n\t\t\treturn\n\t\t}\n\n\t\tl.conn.scanner.max_token_size -= len(token)\n\t\tif l.conn.scanner.max_token_size <= 0 {\n\t\t\tlog.warn(\"request headers too large\")\n\t\t\theaders_set_close(&l.res.headers)\n\t\t\tl.res.status = .Request_Header_Fields_Too_Large\n\t\t\trespond(&l.res)\n\t\t\treturn\n\t\t}\n\n\t\tscanner_scan(&l.conn.scanner, loop, on_header_line)\n\t}\n\n\ton_headers_end :: proc(l: ^Loop) {\n\t\tif !headers_validate_for_server(&l.req.headers) {\n\t\t\tlog.warn(\"request headers are invalid\")\n\t\t\theaders_set_close(&l.res.headers)\n\t\t\tl.res.status = .Bad_Request\n\t\t\trespond(&l.res)\n\t\t\treturn\n\t\t}\n\n\t\tl.req.headers.readonly = true\n\n\t\tl.conn.scanner.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE\n\n\t\t// Automatically respond with a continue status when the client has the Expect: 100-continue header.\n\t\tif expect, ok := headers_get_unsafe(l.req.headers, \"expect\");\n\t\t   ok && expect == \"100-continue\" && l.conn.server.opts.auto_expect_continue {\n\n\t\t\tl.res.status = .Continue\n\n\t\t\trespond(&l.res)\n\t\t\treturn\n\t\t}\n\n\t\trline := &l.req.line.(Requestline)\n\t\t// An options request with the \"*\" is a no-op/ping request to\n\t\t// check for server capabilities and should not be sent to handlers.\n\t\tif rline.method == .Options && rline.target.(string) == \"*\" {\n\t\t\tl.res.status = .OK\n\t\t\trespond(&l.res)\n\t\t} else {\n\t\t\t// Give the handler this request as a GET, since the HTTP spec\n\t\t\t// says a HEAD is identical to a GET but just without writing the body,\n\t\t\t// handlers shouldn't have to worry about it.\n\t\t\tis_head := rline.method == .Head\n\t\t\tif is_head && l.conn.server.opts.redirect_head_to_get {\n\t\t\t\tl.req.is_head = true\n\t\t\t\trline.method = .Get\n\t\t\t}\n\n\t\t\tl.conn.server.handler.handle(&l.conn.server.handler, &l.req, &l.res)\n\t\t}\n\t}\n\n\tc.loop.conn = c\n\tc.loop.res._conn = c\n\tc.loop.req._scanner = &c.scanner\n\trequest_init(&c.loop.req, allocator)\n\tresponse_init(&c.loop.res, allocator)\n\n\tc.scanner.max_token_size = c.server.opts.limit_request_line\n\tscanner_scan(&c.scanner, &c.loop, on_rline1)\n}\n\n// A buffer that will contain the date header for the current second.\n@(private)\nServer_Date :: struct {\n\tbuf_backing: [DATE_LENGTH]byte,\n\tbuf:         bytes.Buffer,\n}\n\n@(private)\nserver_date_start :: proc(s: ^Server) {\n\ts.date.buf.buf = slice.into_dynamic(s.date.buf_backing[:])\n\tserver_date_update(nil, s)\n}\n\n// Updates the time and schedules itself for after a second.\n@(private)\nserver_date_update :: proc(_: ^nbio.Operation, s: ^Server) {\n\tif atomic_load(&s.closing) { return }\n\n\tnbio.timeout_poly(time.Second, s, server_date_update)\n\n\tbytes.buffer_reset(&s.date.buf)\n\tdate_write(bytes.buffer_to_stream(&s.date.buf), time.now())\n}\n\n@(private)\nserver_date :: proc(s: ^Server) -> string {\n\treturn string(s.date.buf_backing[:])\n}\n"
  },
  {
    "path": "status.odin",
    "content": "package http\n\nimport \"base:runtime\"\n\nimport \"core:fmt\"\nimport \"core:strings\"\n\nStatus :: enum {\n\tContinue                        = 100,\n\tSwitching_Protocols             = 101,\n\tProcessing                      = 102,\n\tEarly_Hints                     = 103,\n\n\tOK                              = 200,\n\tCreated                         = 201,\n\tAccepted                        = 202,\n\tNon_Authoritative_Information   = 203,\n\tNo_Content                      = 204,\n\tReset_Content                   = 205,\n\tPartial_Content                 = 206,\n\tMulti_Status                    = 207,\n\tAlready_Reported                = 208,\n\tIM_Used                         = 226,\n\n\tMultiple_Choices                = 300,\n\tMoved_Permanently               = 301,\n\tFound                           = 302,\n\tSee_Other                       = 303,\n\tNot_Modified                    = 304,\n\tUse_Proxy                       = 305, // Deprecated.\n\tUnused                          = 306, // Deprecated.\n\tTemporary_Redirect              = 307,\n\tPermanent_Redirect              = 308,\n\n\tBad_Request                     = 400,\n\tUnauthorized                    = 401,\n\tPayment_Required                = 402,\n\tForbidden                       = 403,\n\tNot_Found                       = 404,\n\tMethod_Not_Allowed              = 405,\n\tNot_Acceptable                  = 406,\n\tProxy_Authentication_Required   = 407,\n\tRequest_Timeout                 = 408,\n\tConflict                        = 409,\n\tGone                            = 410,\n\tLength_Required                 = 411,\n\tPrecondition_Failed             = 412,\n\tPayload_Too_Large               = 413,\n\tURI_Too_Long                    = 414,\n\tUnsupported_Media_Type          = 415,\n\tRange_Not_Satisfiable           = 416,\n\tExpectation_Failed              = 417,\n\tIm_A_Teapot                     = 418,\n\tMisdirected_Request             = 421,\n\tUnprocessable_Content           = 422,\n\tLocked                          = 423,\n\tFailed_Dependency               = 424,\n\tToo_Early                       = 425,\n\tUpgrade_Required                = 426,\n\tPrecondition_Required           = 428,\n\tToo_Many_Requests               = 429,\n\tRequest_Header_Fields_Too_Large = 431,\n\tUnavailable_For_Legal_Reasons   = 451,\n\n\tInternal_Server_Error           = 500,\n\tNot_Implemented                 = 501,\n\tBad_Gateway                     = 502,\n\tService_Unavailable             = 503,\n\tGateway_Timeout                 = 504,\n\tHTTP_Version_Not_Supported      = 505,\n\tVariant_Also_Negotiates         = 506,\n\tInsufficient_Storage            = 507,\n\tLoop_Detected                   = 508,\n\tNot_Extended                    = 510,\n\tNetwork_Authentication_Required = 511,\n}\n\n_status_strings: [max(Status) + Status(1)]string\n\n// Populates the status_strings like a map from status to their string representation.\n// Where an empty string means an invalid code.\n@(init, private)\nstatus_strings_init :: proc \"contextless\" () {\n\tcontext = runtime.default_context()\n\tfor field in Status {\n\t\tname, ok := fmt.enum_value_to_string(field)\n\t\tassert(ok)\n\n\t\tb: strings.Builder\n\t\tstrings.write_int(&b, int(field))\n\t\tstrings.write_byte(&b, ' ')\n\n\t\t// Some edge cases aside, replaces underscores in the enum name with spaces.\n\t\t#partial switch field {\n\t\tcase .Non_Authoritative_Information: strings.write_string(&b, \"Non-Authoritative Information\")\n\t\tcase .Multi_Status:                  strings.write_string(&b, \"Multi-Status\")\n\t\tcase .Im_A_Teapot:                   strings.write_string(&b, \"I'm a teapot\")\n\t\tcase:\n\t\t\tfor c in name {\n\t\t\t\tswitch c {\n\t\t\t\tcase '_': strings.write_rune(&b, ' ')\n\t\t\t\tcase:     strings.write_rune(&b, c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t_status_strings[field] = strings.to_string(b)\n\t}\n}\n\nstatus_string :: proc(s: Status) -> string {\n\tif s >= Status(0) && s <= max(Status) {\n\t\treturn _status_strings[s]\n\t}\n\n\treturn \"\"\n}\n\nstatus_valid :: proc(s: Status) -> bool {\n\treturn status_string(s) != \"\"\n}\n\nstatus_from_string :: proc(s: string) -> (Status, bool) {\n\tif len(s) < 3 { return {}, false }\n\n\tcode_int := int(s[0]-'0')*100 + (int(s[1]-'0')*10) + int(s[2]-'0')\n\n\tif !status_valid(Status(code_int)) {\n\t\treturn {}, false\n\t}\n\n\treturn Status(code_int), true\n}\n\nstatus_is_informational :: proc(s: Status) -> bool {\n\treturn s >= Status(100) && s < Status(200)\n}\n\nstatus_is_success :: proc(s: Status) -> bool {\n\treturn s >= Status(200) && s < Status(300)\n}\n\nstatus_is_redirect :: proc(s: Status) -> bool {\n\treturn s >= Status(300) && s < Status(400)\n}\n\nstatus_is_client_error :: proc(s: Status) -> bool {\n\treturn s >= Status(400) && s < Status(500)\n}\n\nstatus_is_server_error :: proc(s: Status) -> bool {\n\treturn s >= Status(500) && s < Status(600)\n}\n"
  }
]