Full Code of laytan/odin-http for AI

main 112c49b5bcee cached
63 files
47.9 MB
98.8k tokens
4 symbols
1 requests
Download .txt
Showing preview only (348K chars total). Download the full file or copy to clipboard to get everything.
Repository: laytan/odin-http
Branch: main
Commit: 112c49b5bcee
Files: 63
Total size: 47.9 MB

Directory structure:
gitextract_m0_ssysi/

├── .editorconfig
├── .github/
│   └── workflows/
│       ├── ci.yml
│       ├── docs.yml
│       └── openssl.yml
├── .gitignore
├── LICENSE
├── README.md
├── allocator.odin
├── body.odin
├── client/
│   ├── client.odin
│   └── communication.odin
├── comparisons/
│   └── empty-ok-all/
│       ├── README.md
│       ├── bun/
│       │   ├── .gitignore
│       │   ├── bun.lockb
│       │   ├── index.ts
│       │   ├── package.json
│       │   └── tsconfig.json
│       ├── go/
│       │   └── main.go
│       ├── node/
│       │   └── app.js
│       ├── odin/
│       │   └── main.odin
│       └── rust/
│           ├── .gitignore
│           ├── Cargo.toml
│           └── src/
│               └── main.rs
├── cookie.odin
├── docs/
│   ├── all.odin
│   ├── generate.sh
│   └── odin-doc.json
├── examples/
│   ├── client/
│   │   └── main.odin
│   └── tcp_echo/
│       └── main.odin
├── handlers.odin
├── headers.odin
├── http.odin
├── mimes.odin
├── mod.pkg
├── odinfmt.json
├── old_nbio/
│   ├── README.md
│   ├── _io_uring/
│   │   ├── os.odin
│   │   └── sys.odin
│   ├── doc.odin
│   ├── nbio.odin
│   ├── nbio_darwin.odin
│   ├── nbio_internal_darwin.odin
│   ├── nbio_internal_linux.odin
│   ├── nbio_internal_windows.odin
│   ├── nbio_linux.odin
│   ├── nbio_test.odin
│   ├── nbio_unix.odin
│   ├── nbio_windows.odin
│   ├── poly/
│   │   └── poly.odin
│   └── pool.odin
├── openssl/
│   ├── .version
│   ├── includes/
│   │   └── windows/
│   │       ├── libcrypto.lib
│   │       ├── libcrypto_static.lib
│   │       ├── libssl.lib
│   │       └── libssl_static.lib
│   └── openssl.odin
├── request.odin
├── response.odin
├── responses.odin
├── routing.odin
├── scanner.odin
├── server.odin
└── status.odin

================================================
FILE CONTENTS
================================================

================================================
FILE: .editorconfig
================================================
root = true

[*]
end_of_line = lf
insert_final_newline = true
indent_style = tab
indent_size = 4
trim_trailing_whitespace = true

[*.yml]
indent_style = space
indent_size = 2


================================================
FILE: .github/workflows/ci.yml
================================================
name: CI
on:
  push:
  workflow_dispatch:
  schedule:
    - cron: 0 20 * * *

env:
  FORCE_COLOR: "1"

jobs:
  check:
    strategy:
      fail-fast: false
      matrix:
        os: [ubuntu-latest, macos-latest, windows-latest, macos-15-intel]
    runs-on: ${{ matrix.os }}
    steps:
      - uses: actions/checkout@v4
      - uses: laytan/setup-odin@v2
        with:
          release: nightly
      - name: Report
        run: odin report
      - name: Run client example
        run: odin run examples/client
        timeout-minutes: 1
      - name: Odin check
        if: success() || failure()
        run: odin check examples/complete -vet --strict-style && odin check examples/client -vet --strict-style
        timeout-minutes: 1


================================================
FILE: .github/workflows/docs.yml
================================================
name: Deploy docs to GitHub pages

on:
  push:
    branches: [main]
  workflow_dispatch:

env:
  FORCE_COLOR: "1"

permissions:
  contents: read
  pages: write
  id-token: write

concurrency:
  group: "pages"
  cancel-in-progress: true

jobs:
  docs:
    environment:
      name: github-pages
      url: ${{ steps.deployment.outputs.page_url }}
    runs-on: ubuntu-latest
    steps:
      - uses: laytan/setup-odin@v2
        with:
          release: nightly
      - name: Report
        run: odin report
      - name: Get commonmark
        run: sudo apt-get install libcmark-dev
      - name: Get and build Odin docs generator
        run: |
          cd /home/runner
          git clone https://github.com/odin-lang/pkg.odin-lang.org odin-doc
          cd odin-doc
          # The /home/runner/odin directory is in the PATH so output it there.
          odin build . -out:/home/runner/odin/odin-doc
          cd /home/runner
      - uses: actions/checkout@v4
      - name: Generate documentation
        run: ./docs/generate.sh
      - uses: actions/configure-pages@v3
      - uses: actions/upload-pages-artifact@v3
        with:
          path: ./docs/build
      - uses: actions/deploy-pages@v4
        id: deployment


================================================
FILE: .github/workflows/openssl.yml
================================================
name: OpenSSL
on:
  push:
    paths: [".github/workflows/openssl.yml"]
    branches: ["main"]
  workflow_dispatch:
  schedule:
    - cron: 0 20 * * *

env:
  FORCE_COLOR: "1"

concurrency:
  group: "openssl"
  cancel-in-progress: true

jobs:
  check-updates:
    runs-on: windows-latest
    steps:
      - uses: actions/checkout@v4

      - id: current-release
        shell: bash
        run: |
          VERSION=$(cat openssl/.version)
          echo "version=$VERSION" >> $GITHUB_OUTPUT
          echo "current version is $VERSION"

      - uses: actions/github-script@v7
        id: latest-release
        with:
          script: |
            const latestRelease = await github.rest.repos.getLatestRelease({
              owner: 'openssl',
              repo: 'openssl',
            });
            core.setOutput('version', latestRelease.data.tag_name);

            const asset = latestRelease.data.assets.find(asset => asset.name.endsWith('.tar.gz'));
            if (asset) {
              core.setOutput('url', asset.browser_download_url);
              core.setOutput('version', latestRelease.data.tag_name);
              core.info('latest version is ' + latestRelease.data.tag_name);
            } else {
              core.setFailed('No .tar.gz asset found in the latest release.');
            }

      - name: update .version
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        shell: bash
        run: |
          echo "${{ steps.latest-release.outputs.version }}" > openssl/.version

      - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}

      - uses: ilammy/setup-nasm@13cbeb366c45c4379d3478cdcbadd8295feb5028
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}

      - name: download release
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        shell: bash
        run: |
          curl -L -o openssl.tar.gz ${{ steps.latest-release.outputs.url }}
          file openssl.tar.gz

      - name: unzip release
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        shell: bash
        run: |
          tar -xzf openssl.tar.gz

      - name: configure
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        run: |
          cd ${{ steps.latest-release.outputs.version }}
          perl Configure VC-WIN64A-HYBRIDCRT no-legacy no-deprecated no-tls-deprecated-ec no-quic no-uplink --release --api=3.0

      - name: compile
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        run: |
          cd ${{ steps.latest-release.outputs.version }}
          nmake

      - name: test
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        run: |
          cd ${{ steps.latest-release.outputs.version }}
          nmake test

      - name: copy & clean
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        shell: bash
        run: |
          rm -rf openssl/includes/windows/*

          cd ${{ steps.latest-release.outputs.version }}
          dir
          cp libcrypto.lib ../openssl/includes/windows
          cp libssl.lib ../openssl/includes/windows
          cp libcrypto_static.lib ../openssl/includes/windows
          cp libssl_static.lib ../openssl/includes/windows

          cd ..
          rm -rf openssl.tar.gz
          rm -rf ${{ steps.latest-release.outputs.version }}

      - name: pr
        if: ${{ steps.current-release.outputs.version != steps.latest-release.outputs.version }}
        uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c
        with:
          title: |
            Update bundled OpenSSL libraries to ${{ steps.latest-release.outputs.version }}
          commit-message: |
            openssl: update bundled libraries to ${{ steps.latest-release.outputs.version }}


================================================
FILE: .gitignore
================================================
*.bin
ols.json
opm
Taskfile.yml
*.exe
docs/build

# Example binaries.
minimal
complete
readme
routing


================================================
FILE: LICENSE
================================================
Copyright (c) 2023 Laytan Laats

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
# Odin HTTP

A HTTP/1.1 implementation for Odin purely written in Odin (besides SSL).

See generated package documentation at [odin-http.laytan.dev](https://odin-http.laytan.dev).

See below examples or the examples directory.

## Compatibility

This is beta software, confirmed to work in my own use cases but can certainly contain edge cases and bugs that I did not catch.
Please file issues for any bug or suggestion you encounter/have.

I am usually on a recent master version of Odin and commits will be made with new features if applicable, backwards compatibility or even
stable version compatibility is not currently a thing.

Because this is still heavily in development, I do not hesitate to push API changes at the moment, so beware.

The package has been tested to work with Ubuntu Linux (other "normal" distros should work), MacOS (m1 and intel), and Windows 64 bit.
Any other distributions or versions have not been tested and might not work.

## Dependencies

The *client* package depends on OpenSSL for making HTTPS requests.

This repository contains a copy of these libraries for ease of use on Windows.

For Linux, most distros come with OpenSSL, if not you can install it with a package manager, usually under `libssl3`.

## Performance

Some small benchmarks have been done in the comparisons directory.

My main priority in terms of performance is currently Linux (because most servers end up there in production).

Other targets are still made to be performant, but benchmarking etc. is mostly done on Linux.

## IO implementations

Although these implementation details are not exposed when using the package, these are the underlying kernel API's that are used.

- Windows: [IOCP (IO Completion Ports)](https://en.wikipedia.org/wiki/Input/output_completion_port)
- Linux:   [io_uring](https://en.wikipedia.org/wiki/Io_uring)
- Darwin:  [KQueue](https://en.wikipedia.org/wiki/Kqueue)

The IO part of this package can be used on its own for other types of applications, see the nbio directory for the documentation on that.
It has APIs for reading, writing, opening, closing, seeking files and accepting, connecting, sending, receiving and closing sockets, both UDP and TCP, fully cross-platform.

## Server example

```odin
package main

import "core:fmt"
import "core:log"
import "core:net"
import "core:time"

import http "../.." // Change to path of package.

main :: proc() {
	context.logger = log.create_console_logger(.Info)

	s: http.Server
	// Register a graceful shutdown when the program receives a SIGINT signal.
	http.server_shutdown_on_interrupt(&s)

	// Set up routing
	router: http.Router
	http.router_init(&router)
	defer http.router_destroy(&router)

	// Routes are tried in order.
	// Route matching is implemented using an implementation of Lua patterns, see the docs on them here:
	// https://www.lua.org/pil/20.2.html
	// They are very similar to regex patterns but a bit more limited, which makes them much easier to implement since Odin does not have a regex implementation.

	// Matches /users followed by any word (alphanumeric) followed by /comments and then / with any number.
	// The word is available as req.url_params[0], and the number as req.url_params[1].
	http.route_get(&router, "/users/(%w+)/comments/(%d+)", http.handler(proc(req: ^http.Request, res: ^http.Response) {
		http.respond_plain(res, fmt.tprintf("user %s, comment: %s", req.url_params[0], req.url_params[1]))
	}))
	http.route_get(&router, "/cookies", http.handler(cookies))
	http.route_get(&router, "/api", http.handler(api))
	http.route_get(&router, "/ping", http.handler(ping))
	http.route_get(&router, "/index", http.handler(index))

	// Matches every get request that did not match another route.
	http.route_get(&router, "(.*)", http.handler(static))

	http.route_post(&router, "/ping", http.handler(post_ping))

	routed := http.router_handler(&router)

	log.info("Listening on http://localhost:6969")

	err := http.listen_and_serve(&s, routed, net.Endpoint{address = net.IP4_Loopback, port = 6969})
	fmt.assertf(err == nil, "server stopped with error: %v", err)
}

cookies :: proc(req: ^http.Request, res: ^http.Response) {
	append(
		&res.cookies,
		http.Cookie{
			name         = "Session",
			value        = "123",
			expires_gmt  = time.now(),
			max_age_secs = 10,
			http_only    = true,
			same_site    = .Lax,
		},
	)
	http.respond_plain(res, "Yo!")
}

api :: proc(req: ^http.Request, res: ^http.Response) {
	if err := http.respond_json(res, req.line); err != nil {
		log.errorf("could not respond with JSON: %s", err)
	}
}

ping :: proc(req: ^http.Request, res: ^http.Response) {
	http.respond_plain(res, "pong")
}

index :: proc(req: ^http.Request, res: ^http.Response) {
	http.respond_file(res, "examples/complete/static/index.html")
}

static :: proc(req: ^http.Request, res: ^http.Response) {
	http.respond_dir(res, "/", "examples/complete/static", req.url_params[0])
}

post_ping :: proc(req: ^http.Request, res: ^http.Response) {
	http.body(req, len("ping"), res, proc(res: rawptr, body: http.Body, err: http.Body_Error) {
		res := cast(^http.Response)res

		if err != nil {
			http.respond(res, http.body_error_status(err))
			return
		}

		if body != "ping" {
			http.respond(res, http.Status.Unprocessable_Content)
			return
		}

		http.respond_plain(res, "pong")
	})
}
```

## Client example

```odin
package main

import "core:fmt"

import "../../client"

main :: proc() {
	get()
	post()
}

// basic get request.
get :: proc() {
	res, err := client.get("https://www.google.com/")
	if err != nil {
		fmt.printf("Request failed: %s", err)
		return
	}
	defer client.response_destroy(&res)

	fmt.printf("Status: %s\n", res.status)
	fmt.printf("Headers: %v\n", res.headers)
	fmt.printf("Cookies: %v\n", res.cookies)
	body, allocation, berr := client.response_body(&res)
	if berr != nil {
		fmt.printf("Error retrieving response body: %s", berr)
		return
	}
	defer client.body_destroy(body, allocation)

	fmt.println(body)
}

Post_Body :: struct {
	name:    string,
	message: string,
}

// POST request with JSON.
post :: proc() {
	req: client.Request
	client.request_init(&req, .Post)
	defer client.request_destroy(&req)

	pbody := Post_Body{"Laytan", "Hello, World!"}
	if err := client.with_json(&req, pbody); err != nil {
		fmt.printf("JSON error: %s", err)
		return
	}

	res, err := client.request(&req, "https://webhook.site/YOUR-ID-HERE")
	if err != nil {
		fmt.printf("Request failed: %s", err)
		return
	}
	defer client.response_destroy(&res)

	fmt.printf("Status: %s\n", res.status)
	fmt.printf("Headers: %v\n", res.headers)
	fmt.printf("Cookies: %v\n", res.cookies)

	body, allocation, berr := client.response_body(&res)
	if berr != nil {
		fmt.printf("Error retrieving response body: %s", berr)
		return
	}
	defer client.body_destroy(body, allocation)

	fmt.println(body)
}
```


================================================
FILE: allocator.odin
================================================
#+private
#+build ignore
package http

// NOTE: currently not in use, had a strange crash I can't figure out.

import "core:container/queue"
import "core:log"
import "core:mem"

// Defaults, reassigned when server is set up.
initial_block_cap      := mem.Kilobyte * 256
max_free_blocks_queued := 64

// A lean, growing, block based allocator.
//
// The first block is kept around after a `free_all` and only free'd using `allocator_destroy`,
// so it doesn't have to allocate it each time.
//
// Blocks start at the `initial_block_cap` (configurable) size and double in size after each new block.
//
// The last allocation is saved and can be freed with `free_with_size` or resized without
// taking up a whole new region in the block.
Allocator :: struct {
	parent:     mem.Allocator,
	curr:       ^Block,
	cap:        int,
	last_alloc: rawptr,
}

Block :: struct {
	prev:       Maybe(^Block),
	size:       int,
	total_size: int,
	offset:     int,
	data:       [0]byte,
}

allocator :: proc(a: ^Allocator) -> mem.Allocator {
	return {
		procedure = allocator_proc,
		data      = a,
	}
}

allocator_init :: proc(a: ^Allocator, parent := context.allocator, loc := #caller_location) -> mem.Allocator_Error {
	a.parent = parent
	a.cap = initial_block_cap
	a.curr = allocator_new_block(a, 0, 0, loc) or_return
	return nil
}

allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
                             size, alignment: int,
                             old_memory: rawptr, old_size: int,
                             loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {

	a := (^Allocator)(allocator_data)
	switch mode {
	case .Alloc:
		return allocator_alloc_zerod(a, size, alignment, loc)

	case .Alloc_Non_Zeroed:
		return allocator_alloc_non_zerod(a, size, alignment, loc)

	case .Free:
		// We can only free if this was the last allocation done.
		if old_memory == a.last_alloc {
			a.curr.offset -= old_size
			a.last_alloc = nil
			return nil, nil
		}

		return nil, .Mode_Not_Implemented

	case .Free_All:
		allocator_free_all(a, loc)
		return

	case .Resize, .Resize_Non_Zeroed:
		// Shrink, if it was the last alloc also decrease from block offset.
		if old_size >= size {
			if a.last_alloc == old_memory {
				a.curr.offset -= old_size - size
			}

			return mem.byte_slice(old_memory, size), nil
		}

		// If this was the last alloc, and we have space in it's block, keep same spot and just
		// increase the offset.
		if a.last_alloc == old_memory {
			needed := size - old_size
			got    := a.curr.size - a.curr.offset
			if needed <= got {
				a.curr.offset += needed
				return mem.byte_slice(old_memory, size), nil
			}
		}

		// Resize with older than last allocation or doesn't fit in block, need to allocate new mem.
		bytes = allocator_alloc_non_zerod(a, size, alignment, loc) or_return
		copy(bytes, mem.byte_slice(old_memory, old_size))
		return

	case .Query_Features:
		set := (^mem.Allocator_Mode_Set)(old_memory)
		if set != nil {
			set^ = {.Alloc, .Alloc_Non_Zeroed, .Free_All, .Resize, .Query_Features}
		}
		return nil, nil

	case .Query_Info:
		return nil, .Mode_Not_Implemented

	case: unreachable()
	}
}

allocator_new_block :: proc(a: ^Allocator, min_size: int, alignment: int, loc := #caller_location) -> (b: ^Block, err: mem.Allocator_Error) {
	base_offset := max(alignment, size_of(Block))
	total       := max(a.cap, min_size + base_offset)
	a.cap       *= 2

	assert_has_td(loc)
	if bucket, has_bucket := &td.free_temp_blocks[total]; has_bucket {
		if block, has_block := queue.pop_back_safe(bucket); has_block {
			b = block
			td.free_temp_blocks_count -= 1
		}
	}

	if b == nil {
		data := mem.alloc(total, max(16, align_of(Block)), a.parent, loc) or_return
		b     = (^Block)(data)
	}

	b.total_size = total
	b.size       = total - base_offset
	b.offset     = base_offset
	b.prev       = a.curr
	a.curr       = b
	return
}

allocator_alloc_zerod :: proc(a: ^Allocator, size: int, alignment: int, loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {
	bytes, err = allocator_alloc_non_zerod(a, size, alignment, loc)
	mem.zero_slice(bytes)
	return
}

allocator_alloc_non_zerod :: proc(a: ^Allocator, size: int, alignment: int, loc := #caller_location) -> (bytes: []byte, err: mem.Allocator_Error) {
	if size == 0 { return }

	block := a.curr
	data := ([^]byte)(&block.data)

	assert(block != nil, "you must initialize the allocator first", loc)
	assert(alignment & (alignment-1) == 0, "non-power of two alignment", loc)

	// TODO: handle int overflows.

	needed := int(mem.align_forward_uint(uint(size), uint(alignment)))
	if block.offset + needed > block.size {
		block = allocator_new_block(a, needed, alignment, loc) or_return
		data  = ([^]byte)(&block.data)
	}

	alignment_offset := 0; {
		ptr  := uintptr(data[block.offset:])
		mask := uintptr(alignment-1)
		if ptr & mask != 0 {
			alignment_offset = int(uintptr(alignment) - (ptr & mask))
		}
	}

	block.offset += alignment_offset
	bytes = data[block.offset:][:size]
	block.offset += size
	a.last_alloc = raw_data(bytes)
	return
}

allocator_free_all :: proc(a: ^Allocator, loc := #caller_location) -> (blocks: int, total_size: int, total_used: int) {
	blocks += 1
	total_size += a.curr.size + size_of(Block)
	total_used += a.curr.offset

	for a.curr.prev != nil {
		block      := a.curr
		blocks     += 1
		total_size += block.total_size
		total_used += block.offset
		a.curr      = block.prev.?
		allocator_free_block(a, block, loc)
	}

	a.curr.offset = 0
	a.cap = initial_block_cap
	return
}

allocator_destroy :: proc(a: ^Allocator, loc := #caller_location) {
	allocator_free_all(a, loc)
	allocator_free_block(a, a.curr, loc)
}

allocator_free_block :: proc(a: ^Allocator, b: ^Block, loc := #caller_location) {
	assert_has_td(loc)

	if td.free_temp_blocks_count > max_free_blocks_queued {
		free(b, a.parent)
		log.debug("max temp blocks reached, freeing the block")
		return
	}

	bucket, is_initialized := &td.free_temp_blocks[b.total_size]
	if !is_initialized {
		td.free_temp_blocks[b.total_size] = {}
		bucket = &td.free_temp_blocks[b.total_size]
		queue.init(bucket, max_free_blocks_queued, allocator=td.free_temp_blocks.allocator)
	}

	b.prev = nil
	queue.push(bucket, b)
	td.free_temp_blocks_count += 1
}

import "core:testing"

@(test)
test_allocator_alignment_boundary :: proc(t: ^testing.T) {
	arena: Allocator
	allocator_init(&arena)
	context.allocator = allocator(&arena)

	_, _ = mem.alloc(int(arena.cap)-120)
	_, err := mem.alloc(112, 32)
	testing.expect_value(t, err, nil)
}

@(test)
test_temp_allocator_big_alloc_and_alignment :: proc(t: ^testing.T) {
	arena: Allocator
	allocator_init(&arena)
	context.allocator = allocator(&arena)

	mappy: map[[8]int]int
	err := reserve(&mappy, 50000)
	testing.expect_value(t, err, nil)
}

@(test)
test_temp_allocator_returns_correct_size :: proc(t: ^testing.T) {
	arena: Allocator
	allocator_init(&arena)
	context.allocator = allocator(&arena)

	bytes, err := mem.alloc_bytes(10, 16)
	testing.expect_value(t, err, nil)
	testing.expect_value(t, len(bytes), 10)
}


================================================
FILE: body.odin
================================================
package http

import "core:bufio"
import "core:io"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"

Body :: string

Body_Callback :: #type proc(user_data: rawptr, body: Body, err: Body_Error)

Body_Error :: bufio.Scanner_Error

/*
Retrieves the request's body.

If the request has the chunked Transfer-Encoding header set, the chunks are all read and returned.
Otherwise, the Content-Length header is used to determine what to read and return it.

`max_length` can be used to set a maximum amount of bytes we try to read, once it goes over this,
an error is returned.

Do not call this more than once.

**Tip** If an error is returned, easily respond with an appropriate error code like this, `http.respond(res, http.body_error_status(err))`.
*/
body :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {
	assert(req._body_ok == nil, "you can only call body once per request")

	enc_header, ok := headers_get_unsafe(req.headers, "transfer-encoding")
	if ok && strings.has_suffix(enc_header, "chunked") {
		_body_chunked(req, max_length, user_data, cb)
	} else {
		_body_length(req, max_length, user_data, cb)
	}
}

/*
Parses a URL encoded body, aka bodies with the 'Content-Type: application/x-www-form-urlencoded'.

Key&value pairs are percent decoded and put in a map.
*/
body_url_encoded :: proc(plain: Body, allocator := context.temp_allocator) -> (res: map[string]string, ok: bool) {

	insert :: proc(m: ^map[string]string, plain: string, keys: int, vals: int, end: int, allocator := context.temp_allocator) -> bool {
		has_value := vals != -1
		key_end   := vals - 1 if has_value else end
		key       := plain[keys:key_end]
		val       := plain[vals:end] if has_value else ""

		// PERF: this could be a hot spot and I don't like that we allocate the decoded key and value here.
		keye := (net.percent_decode(key, allocator) or_return) if strings.index_byte(key, '%') > -1 else key
		vale := (net.percent_decode(val, allocator) or_return) if has_value && strings.index_byte(val, '%') > -1 else val

		m[keye] = vale
		return true
	}

	count := 1
	for b in plain {
		if b == '&' { count += 1 }
	}

	queries := make(map[string]string, count, allocator)

	keys := 0
	vals := -1
	for b, i in plain {
		switch b {
		case '=':
			vals = i + 1
		case '&':
			insert(&queries, plain, keys, vals, i) or_return
			keys = i + 1
			vals = -1
		}
	}

	insert(&queries, plain, keys, vals, len(plain)) or_return

	return queries, true
}

// Returns an appropriate status code for the given body error.
body_error_status :: proc(e: Body_Error) -> Status {
	switch t in e {
	case bufio.Scanner_Extra_Error:
		switch t {
		case .Too_Long:                            return .Payload_Too_Large
		case .Too_Short, .Bad_Read_Count:          return .Bad_Request
		case .Negative_Advance, .Advanced_Too_Far: return .Internal_Server_Error
		case .None:                                return .OK
		case:
			return .Internal_Server_Error
		}
	case io.Error:
		switch t {
		case .EOF, .Unknown, .No_Progress, .Unexpected_EOF:
			return .Bad_Request
		case .Empty, .Short_Write, .Buffer_Full, .Short_Buffer,
		     .Invalid_Write, .Negative_Read, .Invalid_Whence, .Invalid_Offset,
		     .Invalid_Unread, .Negative_Write, .Negative_Count,
		     .Permission_Denied, .No_Size, .Closed:
			return .Internal_Server_Error
		case .None:
			return .OK
		case:
			return .Internal_Server_Error
		}
	case: unreachable()
	}
}


// "Decodes" a request body based on the content length header.
// Meant for internal usage, you should use `http.request_body`.
_body_length :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {
	req._body_ok = false

	len, ok := headers_get_unsafe(req.headers, "content-length")
	if !ok {
		cb(user_data, "", nil)
		return
	}

	ilen, lenok := strconv.parse_int(len, 10)
	if !lenok {
		cb(user_data, "", .Bad_Read_Count)
		return
	}

	if max_length > -1 && ilen > max_length {
		cb(user_data, "", .Too_Long)
		return
	}

	if ilen == 0 {
		req._body_ok = true
		cb(user_data, "", nil)
		return
	}

	req._scanner.max_token_size = ilen

	req._scanner.split          = scan_num_bytes
	req._scanner.split_data     = rawptr(uintptr(ilen))

	req._body_ok = true
	scanner_scan(req._scanner, user_data, cb)
}

/*
"Decodes" a chunked transfer encoded request body.
Meant for internal usage, you should use `http.request_body`.

PERF: this could be made non-allocating by writing over the part of the body that contains the
metadata with the rest of the body, and then returning a slice of that, but it is some effort and
I don't think this functionality of HTTP is used that much anyway.

RFC 7230 4.1.3 pseudo-code:

length := 0
read chunk-size, chunk-ext (if any), and CRLF
while (chunk-size > 0) {
   read chunk-data and CRLF
   append chunk-data to decoded-body
   length := length + chunk-size
   read chunk-size, chunk-ext (if any), and CRLF
}
read trailer field
while (trailer field is not empty) {
   if (trailer field is allowed to be sent in a trailer) {
   	append trailer field to existing header fields
   }
   read trailer-field
}
Content-Length := length
Remove "chunked" from Transfer-Encoding
Remove Trailer from existing header fields
*/
_body_chunked :: proc(req: ^Request, max_length: int = -1, user_data: rawptr, cb: Body_Callback) {
	req._body_ok = false

	on_scan :: proc(s: rawptr, size_line: string, err: bufio.Scanner_Error) {
		s := cast(^Chunked_State)s
		size_line := size_line

		if err != nil {
			s.cb(s.user_data, "", err)
			return
		}

		// If there is a semicolon, discard everything after it,
		// that would be chunk extensions which we currently have no interest in.
		if semi := strings.index_byte(size_line, ';'); semi > -1 {
			size_line = size_line[:semi]
		}

		size, ok := strconv.parse_int(string(size_line), 16)
		if !ok {
			log.infof("Encountered an invalid chunk size when decoding a chunked body: %q", string(size_line))
			s.cb(s.user_data, "", .Bad_Read_Count)
			return
		}

		// start scanning trailer headers.
		if size == 0 {
			scanner_scan(s.req._scanner, s, on_scan_trailer)
			return
		}

		if s.max_length > -1 && strings.builder_len(s.buf) + size > s.max_length {
			s.cb(s.user_data, "", .Too_Long)
			return
		}

		s.req._scanner.max_token_size = size

		s.req._scanner.split          = scan_num_bytes
		s.req._scanner.split_data     = rawptr(uintptr(size))

		scanner_scan(s.req._scanner, s, on_scan_chunk)
	}

	on_scan_chunk :: proc(s: rawptr, token: string, err: bufio.Scanner_Error) {
		s := cast(^Chunked_State)s

		if err != nil {
			s.cb(s.user_data, "", err)
			return
		}

		s.req._scanner.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
		s.req._scanner.split          = scan_lines

		strings.write_string(&s.buf, token)

		on_scan_empty_line :: proc(s: rawptr, token: string, err: bufio.Scanner_Error) {
			s := cast(^Chunked_State)s

			if err != nil {
				s.cb(s.user_data, "", err)
				return
			}
			assert(len(token) == 0)

			scanner_scan(s.req._scanner, s, on_scan)
		}

		scanner_scan(s.req._scanner, s, on_scan_empty_line)
	}

	on_scan_trailer :: proc(s: rawptr, line: string, err: bufio.Scanner_Error) {
		s := cast(^Chunked_State)s

		// Headers are done, success.
		if err != nil || len(line) == 0 {
			headers_delete_unsafe(&s.req.headers, "trailer")

			te_header := headers_get_unsafe(s.req.headers, "transfer-encoding")
			new_te_header := strings.trim_suffix(te_header, "chunked")

			s.req.headers.readonly = false
			headers_set_unsafe(&s.req.headers, "transfer-encoding", new_te_header)
			s.req.headers.readonly = true

			s.req._body_ok = true
			s.cb(s.user_data, strings.to_string(s.buf), nil)
			return
		}

		key, ok := header_parse(&s.req.headers, string(line))
		if !ok {
			log.infof("Invalid header when decoding chunked body: %q", string(line))
			s.cb(s.user_data, "", .Unknown)
			return
		}

		// A recipient MUST ignore (or consider as an error) any fields that are forbidden to be sent in a trailer.
		if !header_allowed_trailer(key) {
			log.infof("Invalid trailer header received, discarding it: %q", key)
			headers_delete(&s.req.headers, key)
		}

		scanner_scan(s.req._scanner, s, on_scan_trailer)
	}

	Chunked_State :: struct {
		req:        ^Request,
		max_length: int,
		user_data:  rawptr,
		cb:         Body_Callback,

		buf:        strings.Builder,
	}

	s := new(Chunked_State, context.temp_allocator)

	s.buf.buf.allocator = context.temp_allocator

	s.req        = req
	s.max_length = max_length
	s.user_data  = user_data
	s.cb         = cb

	s.req._scanner.split = scan_lines
	scanner_scan(s.req._scanner, s, on_scan)
}


================================================
FILE: client/client.odin
================================================
// package provides a very simple (for now) HTTP/1.1 client.
package client

import "core:bufio"
import "core:bytes"
import "core:c"
import "core:encoding/json"
import "core:io"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"

import http ".."
import openssl "../openssl"

Request :: struct {
	method:  http.Method,
	headers: http.Headers,
	cookies: [dynamic]http.Cookie,
	body:    bytes.Buffer,
}

// Initializes the request with sane defaults using the given allocator.
request_init :: proc(r: ^Request, method := http.Method.Get, allocator := context.allocator) {
	r.method = method
	http.headers_init(&r.headers, allocator)
	r.cookies = make([dynamic]http.Cookie, allocator)
	bytes.buffer_init_allocator(&r.body, 0, 0, allocator)
}

// Destroys the request.
// Header keys and values that the user added will have to be deleted by the user.
// Same with any strings inside the cookies.
request_destroy :: proc(r: ^Request) {
	delete(r.headers._kv)
	delete(r.cookies)
	bytes.buffer_destroy(&r.body)
}

with_json :: proc(r: ^Request, v: any, opt: json.Marshal_Options = {}) -> json.Marshal_Error {
	if r.method == .Get { r.method = .Post }
	http.headers_set_content_type(&r.headers, http.mime_to_content_type(.Json))

	stream := bytes.buffer_to_stream(&r.body)
	opt := opt
	json.marshal_to_writer(io.to_writer(stream), v, &opt) or_return
	return nil
}

get :: proc(target: string, allocator := context.allocator) -> (Response, Error) {
	r: Request
	request_init(&r, .Get, allocator)
	defer request_destroy(&r)

	return request(&r, target, allocator)
}

Request_Error :: enum {
	Ok,
	Invalid_Response_HTTP_Version,
	Invalid_Response_Method,
	Invalid_Response_Header,
	Invalid_Response_Cookie,
}

SSL_Error :: enum {
	Ok,
	Controlled_Shutdown,
	Fatal_Shutdown,
	SSL_Write_Failed,
}

Error :: union #shared_nil {
	net.Dial_Error,
	net.Parse_Endpoint_Error,
	net.Network_Error,
	net.TCP_Send_Error,
	bufio.Scanner_Error,
	Request_Error,
	SSL_Error,
}

request :: proc(request: ^Request, target: string, allocator := context.allocator) -> (res: Response, err: Error) {
	url, endpoint := parse_endpoint(target) or_return

	// NOTE: we don't support persistent connections yet.
	http.headers_set_close(&request.headers)

	req_buf := format_request(url, request, allocator)
	defer bytes.buffer_destroy(&req_buf)

	socket := net.dial_tcp(endpoint) or_return

	// HTTPS using openssl.
	if url.scheme == "https" {
		ctx := openssl.SSL_CTX_new(openssl.TLS_client_method())
		ssl := openssl.SSL_new(ctx)
		openssl.SSL_set_fd(ssl, c.int(socket))

		// For servers using SNI for SSL certs (like cloudflare), this needs to be set.
		chostname := strings.clone_to_cstring(url.host, allocator)
		defer delete(chostname, allocator)
		openssl.SSL_set_tlsext_host_name(ssl, chostname)

		switch openssl.SSL_connect(ssl) {
		case 2:
			err = SSL_Error.Controlled_Shutdown
			return
		case 1: // success
		case:
			err = SSL_Error.Fatal_Shutdown
			return
		}

		buf := bytes.buffer_to_bytes(&req_buf)
		to_write := len(buf)
		for to_write > 0 {
			ret := openssl.SSL_write(ssl, raw_data(buf), c.int(to_write))
			if ret <= 0 {
				err = SSL_Error.SSL_Write_Failed
				return
			}

			to_write -= int(ret)
		}

		return parse_response(SSL_Communication{ssl = ssl, ctx = ctx, socket = socket}, allocator)
	}

	// HTTP, just send the request.
	net.send_tcp(socket, bytes.buffer_to_bytes(&req_buf)) or_return
	return parse_response(socket, allocator)
}

Response :: struct {
	status:    http.Status,
	// headers and cookies should be considered read-only, after a response is returned.
	headers:   http.Headers,
	cookies:   [dynamic]http.Cookie,
	_socket:   Communication,
	_body:     bufio.Scanner,
	_body_err: Body_Error,
}

// Frees the response, closes the connection.
// Optionally pass the response_body returned 'body' and 'was_allocation' to destroy it too.
response_destroy :: proc(res: ^Response, body: Maybe(Body_Type) = nil, was_allocation := false, body_allocator := context.allocator) {
	// Header keys are allocated, values are slices into the body.
	// NOTE: this is fine because we don't add any headers with `headers_set_unsafe()`.
	// If we did, we wouldn't know if the key was allocated or a literal.
	// We also set the headers to readonly before giving them to the user so they can't add any either.
	for k, v in res.headers._kv {
		delete(v, res.headers._kv.allocator)
		delete(k, res.headers._kv.allocator)
	}

	delete(res.headers._kv)

	bufio.scanner_destroy(&res._body)

	for cookie in res.cookies {
		delete(cookie._raw)
	}
	delete(res.cookies)

	if body != nil {
		body_destroy(body.(Body_Type), was_allocation, body_allocator)
	}

	// We close now and not at the time we got the response because reading the body,
	// could make more reads need to happen (like with chunked encoding).
	switch comm in res._socket {
	case net.TCP_Socket:
		net.close(comm)
	case SSL_Communication:
		openssl.SSL_free(comm.ssl)
		openssl.SSL_CTX_free(comm.ctx)
		net.close(comm.socket)
	}
}

Body_Error :: enum {
	None,
	No_Length,
	Invalid_Length,
	Too_Long,
	Scan_Failed,
	Invalid_Chunk_Size,
	Invalid_Trailer_Header,
}

// Any non-special body, could have been a chunked body that has been read in fully automatically.
// Depending on the return value for 'was_allocation' of the parse function, this is either an
// allocated string that you should delete or a slice into the body.
Body_Plain :: string

// A URL encoded body, map, keys and values are fully allocated on the allocator given to the parsing function,
// And should be deleted by you.
Body_Url_Encoded :: map[string]string

Body_Type :: union #no_nil {
	Body_Plain,
	Body_Url_Encoded,
	Body_Error, // TODO: why is this here if we also return an error?
}

// Frees the memory allocated by parsing the body.
// was_allocation is returned by the body parsing procedure.
body_destroy :: proc(body: Body_Type, was_allocation: bool, allocator := context.allocator) {
	switch b in body {
	case Body_Plain:
		if was_allocation { delete(b, allocator) }
	case Body_Url_Encoded:
		for k, v in b {
			delete(k, b.allocator)
			delete(v, b.allocator)
		}
		delete(b)
	case Body_Error:
	}
}

// Retrieves the response's body, can only be called once.
// Free the returned body using body_destroy().
response_body :: proc(
	res: ^Response,
	max_length := -1,
	allocator := context.allocator,
) -> (
	body: Body_Type,
	was_allocation: bool,
	err: Body_Error,
) {
	defer res._body_err = err
	assert(res._body_err == nil)
	body, was_allocation, err = _parse_body(&res.headers, &res._body, max_length, allocator)
	return
}

_parse_body :: proc(
	headers: ^http.Headers,
	_body: ^bufio.Scanner,
	max_length := -1,
	allocator := context.allocator,
) -> (
	body: Body_Type,
	was_allocation: bool,
	err: Body_Error,
) {
	// See [RFC 7230 3.3.3](https://www.rfc-editor.org/rfc/rfc7230#section-3.3.3) for the rules.
	// Point 3 paragraph 3 and point 4 are handled before we get here.

	enc, has_enc       := http.headers_get_unsafe(headers^, "transfer-encoding")
	length, has_length := http.headers_get_unsafe(headers^, "content-length")
	switch {
	case has_enc && strings.has_suffix(enc, "chunked"):
		was_allocation = true
		body = _response_body_chunked(headers, _body, max_length, allocator) or_return

	case has_length:
		body = _response_body_length(_body, max_length, length) or_return

	case:
		body = _response_till_close(_body, max_length) or_return
	}

	// Automatically decode url encoded bodies.
	if typ, ok := http.headers_get_unsafe(headers^, "content-type"); ok && typ == "application/x-www-form-urlencoded" {
		plain := body.(Body_Plain)
		defer if was_allocation { delete(plain, allocator) }

		keyvalues := strings.split(plain, "&", allocator)
		defer delete(keyvalues, allocator)

		queries := make(Body_Url_Encoded, len(keyvalues), allocator)
		for keyvalue in keyvalues {
			seperator := strings.index(keyvalue, "=")
			if seperator == -1 { 	// The keyvalue has no value.
				queries[keyvalue] = ""
				continue
			}

			key, key_decoded_ok := net.percent_decode(keyvalue[:seperator], allocator)
			if !key_decoded_ok {
				log.warnf("url encoded body key %q could not be decoded", keyvalue[:seperator])
				continue
			}

			val, val_decoded_ok := net.percent_decode(keyvalue[seperator + 1:], allocator)
			if !val_decoded_ok {
				log.warnf("url encoded body value %q for key %q could not be decoded", keyvalue[seperator + 1:], key)
				continue
			}

			queries[key] = val
		}

		body = queries
	}

	return
}

_response_till_close :: proc(_body: ^bufio.Scanner, max_length: int) -> (string, Body_Error) {
	_body.max_token_size = max_length
	defer _body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE

	_body.split = proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: bufio.Scanner_Error, final_token: bool) {
		if at_eof {
			return len(data), data, nil, true
		}

		return
	}
	defer _body.split = bufio.scan_lines

	if !bufio.scanner_scan(_body) {
		if bufio.scanner_error(_body) == .Too_Long {
			return "", .Too_Long
		}

		return "", .Scan_Failed
	}

	return bufio.scanner_text(_body), .None
}

// "Decodes" a response body based on the content length header.
// Meant for internal usage, you should use `client.response_body`.
_response_body_length :: proc(_body: ^bufio.Scanner, max_length: int, len: string) -> (string, Body_Error) {
	ilen, lenok := strconv.parse_int(len, 10)
	if !lenok {
		return "", .Invalid_Length
	}

	if max_length > -1 && ilen > max_length {
		return "", .Too_Long
	}

	if ilen == 0 {
		return "", nil
	}

	// user_index is used to set the amount of bytes to scan in scan_num_bytes.
	context.user_index = ilen

	_body.max_token_size = ilen
	defer _body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE

	_body.split = scan_num_bytes
	defer _body.split = bufio.scan_lines

	log.debugf("scanning %i bytes body", ilen)

	if !bufio.scanner_scan(_body) {
		return "", .Scan_Failed
	}

	return bufio.scanner_text(_body), .None
}

// "Decodes" a chunked transfer encoded request body.
// Meant for internal usage, you should use `client.response_body`.
//
// RFC 7230 4.1.3 pseudo-code:
//
// length := 0
// read chunk-size, chunk-ext (if any), and CRLF
// while (chunk-size > 0) {
//    read chunk-data and CRLF
//    append chunk-data to decoded-body
//    length := length + chunk-size
//    read chunk-size, chunk-ext (if any), and CRLF
// }
// read trailer field
// while (trailer field is not empty) {
//    if (trailer field is allowed to be sent in a trailer) {
//    	append trailer field to existing header fields
//    }
//    read trailer-field
// }
// Content-Length := length
// Remove "chunked" from Transfer-Encoding
// Remove Trailer from existing header fields
_response_body_chunked :: proc(
	headers: ^http.Headers,
	_body: ^bufio.Scanner,
	max_length: int,
	allocator := context.allocator,
) -> (
	body: string,
	err: Body_Error,
) {
	body_buff: bytes.Buffer

	bytes.buffer_init_allocator(&body_buff, 0, 0, allocator)
	defer if err != nil { bytes.buffer_destroy(&body_buff) }

	for {
		if !bufio.scanner_scan(_body) {
			return "", .Scan_Failed
		}

		size_line := bufio.scanner_bytes(_body)

		// If there is a semicolon, discard everything after it,
		// that would be chunk extensions which we currently have no interest in.
		if semi := bytes.index_byte(size_line, ';'); semi > -1 {
			size_line = size_line[:semi]
		}

		size, ok := strconv.parse_int(string(size_line), 16)
		if !ok {
			err = .Invalid_Chunk_Size
			return
		}
		if size == 0 { break }

		if max_length > -1 && bytes.buffer_length(&body_buff) + size > max_length {
			return "", .Too_Long
		}

		// user_index is used to set the amount of bytes to scan in scan_num_bytes.
		context.user_index = size

		_body.max_token_size = size
		_body.split = scan_num_bytes

		if !bufio.scanner_scan(_body) {
			return "", .Scan_Failed
		}

		_body.max_token_size = bufio.DEFAULT_MAX_SCAN_TOKEN_SIZE
		_body.split = bufio.scan_lines

		bytes.buffer_write(&body_buff, bufio.scanner_bytes(_body))

		// Read empty line after chunk.
		if !bufio.scanner_scan(_body) {
			return "", .Scan_Failed
		}
		assert(bufio.scanner_text(_body) == "")
	}

	// Read trailing empty line (after body, before trailing headers).
	if !bufio.scanner_scan(_body) || bufio.scanner_text(_body) != "" {
		return "", .Scan_Failed
	}

	// Keep parsing the request as line delimited headers until we get to an empty line.
	for {
		// If there are no trailing headers, this case is hit.
		if !bufio.scanner_scan(_body) {
			break
		}

		line := bufio.scanner_text(_body)

		// The first empty line denotes the end of the headers section.
		if line == "" {
			break
		}

		key, ok := http.header_parse(headers, line)
		if !ok {
			return "", .Invalid_Trailer_Header
		}

		// A recipient MUST ignore (or consider as an error) any fields that are forbidden to be sent in a trailer.
		if !http.header_allowed_trailer(key) {
			http.headers_delete(headers, key)
		}
	}

	if http.headers_has_unsafe(headers^, "trailer") {
		http.headers_delete_unsafe(headers, "trailer")
	}

	te := strings.trim_suffix(http.headers_get_unsafe(headers^, "transfer-encoding"), "chunked")

	headers.readonly = false
	http.headers_set_unsafe(headers, "transfer-encoding", te)
	headers.readonly = true

	return bytes.buffer_to_string(&body_buff), .None
}

// A scanner bufio.Split_Proc implementation to scan a given amount of bytes.
// The amount of bytes should be set in the context.user_index.
@(private)
scan_num_bytes :: proc(
	data: []byte,
	at_eof: bool,
) -> (
	advance: int,
	token: []byte,
	err: bufio.Scanner_Error,
	final_token: bool,
) {
	n := context.user_index // Set context.user_index to the amount of bytes to read.
	if at_eof && len(data) < n {
		return
	}

	if len(data) < n {
		return
	}

	return n, data[:n], nil, false
}


================================================
FILE: client/communication.odin
================================================
#+private
package client

import "core:bufio"
import "core:bytes"
import "core:c"
import "core:io"
import "core:log"
import "core:net"
import "core:strconv"
import "core:strings"

import http ".."
import openssl "../openssl"

parse_endpoint :: proc(target: string) -> (url: http.URL, endpoint: net.Endpoint, err: net.Network_Error) {
	url = http.url_parse(target)
	host_or_endpoint := net.parse_hostname_or_endpoint(url.host) or_return

	switch t in host_or_endpoint {
	case net.Endpoint:
		endpoint = t
		return
	case net.Host:
		ep4, ep6 := net.resolve(t.hostname) or_return
		endpoint = ep4 if ep4.address != nil else ep6

		endpoint.port = t.port
		if endpoint.port == 0 {
			endpoint.port = url.scheme == "https" ? 443 : 80
		}
		return
	case:
		unreachable()
	}
}

format_request :: proc(target: http.URL, request: ^Request, allocator := context.allocator) -> (buf: bytes.Buffer) {
	// Responses are on average at least 100 bytes, so lets start there, but add the body's length.
	bytes.buffer_init_allocator(&buf, 0, bytes.buffer_length(&request.body) + 100, allocator)

	http.requestline_write(
		bytes.buffer_to_stream(&buf),
		{method = request.method, target = target, version = http.Version{1, 1}},
	)

	if !http.headers_has_unsafe(request.headers, "content-length") {
		buf_len := bytes.buffer_length(&request.body)
		if buf_len == 0 {
			bytes.buffer_write_string(&buf, "content-length: 0\r\n")
		} else {
			bytes.buffer_write_string(&buf, "content-length: ")

			// Make sure at least 20 bytes are there to write into, should be enough for the content length.
			bytes.buffer_grow(&buf, buf_len + 20)

			// Write the length into unwritten portion.
			unwritten := http._dynamic_unwritten(buf.buf)
			l := len(strconv.write_int(unwritten, i64(buf_len), 10))
			assert(l <= 20)
			http._dynamic_add_len(&buf.buf, l)

			bytes.buffer_write_string(&buf, "\r\n")
		}
	}

	if !http.headers_has_unsafe(request.headers, "accept") {
		bytes.buffer_write_string(&buf, "accept: */*\r\n")
	}

	if !http.headers_has_unsafe(request.headers, "user-agent") {
		bytes.buffer_write_string(&buf, "user-agent: odin-http\r\n")
	}

	if !http.headers_has_unsafe(request.headers, "host") {
		bytes.buffer_write_string(&buf, "host: ")
		bytes.buffer_write_string(&buf, target.host)
		bytes.buffer_write_string(&buf, "\r\n")
	}

	for header, value in request.headers._kv {
		bytes.buffer_write_string(&buf, header)
		bytes.buffer_write_string(&buf, ": ")

		// Escape newlines in headers, if we don't, an attacker can find an endpoint
		// that returns a header with user input, and inject headers into the response.
		esc_value, was_allocation := strings.replace_all(value, "\n", "\\n", allocator)
		defer if was_allocation { delete(esc_value) }

		bytes.buffer_write_string(&buf, esc_value)
		bytes.buffer_write_string(&buf, "\r\n")
	}

	if len(request.cookies) > 0 {
		bytes.buffer_write_string(&buf, "cookie: ")

		for cookie, i in request.cookies {
			bytes.buffer_write_string(&buf, cookie.name)
			bytes.buffer_write_byte(&buf, '=')
			bytes.buffer_write_string(&buf, cookie.value)

			if i != len(request.cookies) - 1 {
				bytes.buffer_write_string(&buf, "; ")
			}
		}

		bytes.buffer_write_string(&buf, "\r\n")
	}

	// Empty line denotes end of headers and start of body.
	bytes.buffer_write_string(&buf, "\r\n")

	bytes.buffer_write(&buf, bytes.buffer_to_bytes(&request.body))
	return
}

SSL_Communication :: struct {
	socket: net.TCP_Socket,
	ssl:    ^openssl.SSL,
	ctx:    ^openssl.SSL_CTX,
}

Communication :: union {
	net.TCP_Socket, // HTTP.
	SSL_Communication, // HTTPS.
}

parse_response :: proc(socket: Communication, allocator := context.allocator) -> (res: Response, err: Error) {
	res._socket = socket

	stream: io.Stream
	switch comm in socket {
	case net.TCP_Socket:
		stream = tcp_stream(comm)
	case SSL_Communication:
		stream = ssl_tcp_stream(comm.ssl)
	}

	stream_reader := io.to_reader(stream)
	scanner: bufio.Scanner
	bufio.scanner_init(&scanner, stream_reader, allocator)

	http.headers_init(&res.headers, allocator)

	if !bufio.scanner_scan(&scanner) {
		err = bufio.scanner_error(&scanner)
		return
	}

	rline_str := bufio.scanner_text(&scanner)
	si := strings.index_byte(rline_str, ' ')

	version, ok := http.version_parse(rline_str[:si])
	if !ok {
		err = Request_Error.Invalid_Response_HTTP_Version
		return
	}

	// Might need to support more versions later.
	if version.major != 1 {
		err = Request_Error.Invalid_Response_HTTP_Version
		return
	}

	res.status, ok = http.status_from_string(rline_str[si + 1:])
	if !ok {
		err = Request_Error.Invalid_Response_Method
		return
	}

	res.cookies.allocator = allocator

	for {
		if !bufio.scanner_scan(&scanner) {
			err = bufio.scanner_error(&scanner)
			return
		}

		line := bufio.scanner_text(&scanner)
		// Empty line means end of headers.
		if line == "" { break }

		key, hok := http.header_parse(&res.headers, line, allocator)
		if !hok {
			err = Request_Error.Invalid_Response_Header
			return
		}

		if key == "set-cookie" {
			cookie_str := http.headers_get_unsafe(res.headers, "set-cookie")
			http.headers_delete_unsafe(&res.headers, "set-cookie")
			delete(key, allocator)

			cookie, cok := http.cookie_parse(cookie_str, allocator)
			if !cok {
				err = Request_Error.Invalid_Response_Cookie
				delete(cookie_str, allocator)
				return
			}

			append(&res.cookies, cookie)
		}
	}

	if !http.headers_validate(&res.headers) {
		err = Request_Error.Invalid_Response_Header
		return
	}

	res.headers.readonly = true

	res._body = scanner
	return res, nil
}

ssl_tcp_stream :: proc(sock: ^openssl.SSL) -> (s: io.Stream) {
	s.data = sock
	s.procedure = _ssl_stream_proc
	return s
}

@(private)
_ssl_stream_proc :: proc(
	stream_data: rawptr,
	mode: io.Stream_Mode,
	p: []byte,
	offset: i64,
	whence: io.Seek_From,
) -> (
	n: i64,
	err: io.Error,
) {
	#partial switch mode {
	case .Query:
		return io.query_utility(io.Stream_Mode_Set{.Query, .Read})
	case .Read:
		ssl := cast(^openssl.SSL)stream_data
		ret := openssl.SSL_read(ssl, raw_data(p), c.int(len(p)))
		if ret <= 0 {
			return 0, .Unexpected_EOF
		}

		return i64(ret), nil
	case:
		err = .Empty
	}
	return
}

// Wraps a tcp socket with a stream.
tcp_stream :: proc(sock: net.TCP_Socket) -> (s: io.Stream) {
	s.data = rawptr(uintptr(sock))
	s.procedure = _socket_stream_proc
	return s
}

@(private)
_socket_stream_proc :: proc(
	stream_data: rawptr,
	mode: io.Stream_Mode,
	p: []byte,
	offset: i64,
	whence: io.Seek_From,
) -> (
	n: i64,
	err: io.Error,
) {
	#partial switch mode {
	case .Query:
		return io.query_utility(io.Stream_Mode_Set{.Query, .Read})
	case .Read:
		sock := net.TCP_Socket(uintptr(stream_data))
		received, recv_err := net.recv_tcp(sock, p)
		n = i64(received)

		#partial switch recv_err {
		case .None:
			err = .None
		case .Network_Unreachable, .Insufficient_Resources, .Invalid_Argument, .Not_Connected, .Connection_Closed, .Timeout, .Would_Block, .Interrupted:
			log.errorf("unexpected error reading tcp: %s", recv_err)
			err = .Unexpected_EOF
		case:
			log.errorf("unexpected error reading tcp: %s", recv_err)
			err = .Unknown
		}
		case nil:
			err = .None
		case:
			assert(false, "recv_tcp only returns TCP_Recv_Error or nil")
		}
	return
}


================================================
FILE: comparisons/empty-ok-all/README.md
================================================
# Comparison - Empty OK All

This comparison measures raw IO rate, the server needs to respond to requests on port :8080 with 200 OK.

Of course this is not a full picture but you can get an idea of performance.

## Results

Taken on Pop!_OS Linux using a AMD Ryzen 7 5800X 8-core processor.

Load is created using [Bombardier](https://github.com/codesenberg/bombardier) set to 250 connections and 10.000.000 requests.

Bombardier command used: `bombardier -c 250 -n 10000000 http://localhost:8080`

| Language/framework | Command                                                   | Requests per second | Total time | Avg response time | Throughput |
|--------------------|-----------------------------------------------------------|---------------------|------------|-------------------|------------|
| Rust Actix  4.2    | `cargo build --release` (this installs 256 dependencies!) | 712k                | 14s        | 347us             | 120.8MB/s  |
| Odin-HTTP   dev    | `odin build . -o:speed -disable-assert -no-bounds-check`  | 637k                | 15s        | 340us             | 105.2MB/s  |
| Go net/http 1.21   | `go build main.go`                                        | 598k                | 16s        | 417us             | 77.98MB/s  |
| Bun.serve   1.1    | `NODE_ENV=production bun run index.ts`                    | 302k                | 33s        | 827us             | 39.43MB/s  |
| Node http   20.5   | `NODE_ENV=production node app.js`                         |  65k                | 2m35s      | 3.88ms            | 12.90MB/s  |


================================================
FILE: comparisons/empty-ok-all/bun/.gitignore
================================================
# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore

# Logs

logs
_.log
npm-debug.log_
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)

report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json

# Runtime data

pids
_.pid
_.seed
\*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover

lib-cov

# Coverage directory used by tools like istanbul

coverage
\*.lcov

# nyc test coverage

.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)

.grunt

# Bower dependency directory (https://bower.io/)

bower_components

# node-waf configuration

.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)

build/Release

# Dependency directories

node_modules/
jspm_packages/

# Snowpack dependency directory (https://snowpack.dev/)

web_modules/

# TypeScript cache

\*.tsbuildinfo

# Optional npm cache directory

.npm

# Optional eslint cache

.eslintcache

# Optional stylelint cache

.stylelintcache

# Microbundle cache

.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history

.node_repl_history

# Output of 'npm pack'

\*.tgz

# Yarn Integrity file

.yarn-integrity

# dotenv environment variable files

.env
.env.development.local
.env.test.local
.env.production.local
.env.local

# parcel-bundler cache (https://parceljs.org/)

.cache
.parcel-cache

# Next.js build output

.next
out

# Nuxt.js build / generate output

.nuxt
dist

# Gatsby files

.cache/

# Comment in the public line in if your project uses Gatsby and not Next.js

# https://nextjs.org/blog/next-9-1#public-directory-support

# public

# vuepress build output

.vuepress/dist

# vuepress v2.x temp and cache directory

.temp
.cache

# Docusaurus cache and generated files

.docusaurus

# Serverless directories

.serverless/

# FuseBox cache

.fusebox/

# DynamoDB Local files

.dynamodb/

# TernJS port file

.tern-port

# Stores VSCode versions used for testing VSCode extensions

.vscode-test

# yarn v2

.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.\*


================================================
FILE: comparisons/empty-ok-all/bun/index.ts
================================================
const server = Bun.serve({
	port: 8080,
	fetch(req) {
		return new Response();
	},
});

console.log(`Listening on http://localhost:${server.port}...`);


================================================
FILE: comparisons/empty-ok-all/bun/package.json
================================================
{
  "name": "empty-ok-all",
  "module": "index.ts",
  "type": "module",
  "devDependencies": {
    "bun-types": "latest"
  },
  "peerDependencies": {
    "typescript": "^5.0.0"
  }
}

================================================
FILE: comparisons/empty-ok-all/bun/tsconfig.json
================================================
{
  "compilerOptions": {
    "lib": ["ESNext"],
    "module": "esnext",
    "target": "esnext",
    "moduleResolution": "bundler",
    "moduleDetection": "force",
    "allowImportingTsExtensions": true,
    "noEmit": true,
    "composite": true,
    "strict": true,
    "downlevelIteration": true,
    "skipLibCheck": true,
    "jsx": "preserve",
    "allowSyntheticDefaultImports": true,
    "forceConsistentCasingInFileNames": true,
    "allowJs": true,
    "types": [
      "bun-types" // add Bun global
    ]
  }
}


================================================
FILE: comparisons/empty-ok-all/go/main.go
================================================
package main

import (
    "net/http"
)

func main() {
    http.HandleFunc("/", HelloServer)
    http.ListenAndServe(":8080", nil)
}

func HelloServer(w http.ResponseWriter, r *http.Request) {}


================================================
FILE: comparisons/empty-ok-all/node/app.js
================================================
const http = require('http');

const hostname = '127.0.0.1';
const port = 8080;

const server = http.createServer((req, res) => {
  res.statusCode = 200;
  res.setHeader('Content-Type', 'text/plain');
  res.end();
});

server.listen(port, hostname, () => {
  console.log(`Server running at http://${hostname}:${port}/`);
});


================================================
FILE: comparisons/empty-ok-all/odin/main.odin
================================================
package empty_ok_all

import "core:fmt"

import http "../../.."

main :: proc() {
	s: http.Server

	fmt.println("Listening on http://localost:8080...")

	handler := http.handler(proc(_: ^http.Request, res: ^http.Response) {
		res.status = .OK
		http.respond(res)
	})

	http.listen_and_serve(&s, handler)
}


================================================
FILE: comparisons/empty-ok-all/rust/.gitignore
================================================
target


================================================
FILE: comparisons/empty-ok-all/rust/Cargo.toml
================================================
[package]
name = "rust"
version = "0.1.0"
edition = "2021"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
actix-web = "4.2.1"
serde = { version = "1.0.145", features = ["derive"] }


================================================
FILE: comparisons/empty-ok-all/rust/src/main.rs
================================================
use actix_web::{web, App, HttpServer};

#[actix_web::main]
async fn main() -> std::io::Result<()> {
    HttpServer::new(|| {
        App::new()
            .service(web::resource("/").to(|| async { "" }))
    })
    .bind(("127.0.0.1", 8080))?
    .run()
    .await
}


================================================
FILE: cookie.odin
================================================
package http

import "core:io"
import "core:strconv"
import "core:strings"
import "core:time"

Cookie_Same_Site :: enum {
	Unspecified,
	None,
	Strict,
	Lax,
}

Cookie :: struct {
	_raw:         string,
	name:         string,
	value:        string,
	domain:       Maybe(string),
	expires_gmt:  Maybe(time.Time),
	max_age_secs: Maybe(int),
	path:         Maybe(string),
	http_only:    bool,
	partitioned:  bool,
	secure:       bool,
	same_site:    Cookie_Same_Site,
}

// Builds the Set-Cookie header string representation of the given cookie.
cookie_write :: proc(w: io.Writer, c: Cookie) -> io.Error {
	// odinfmt:disable
	io.write_string(w, "set-cookie: ") or_return
	write_escaped_newlines(w, c.name)  or_return
	io.write_byte(w, '=')              or_return
	write_escaped_newlines(w, c.value) or_return

	if d, ok := c.domain.(string); ok {
		io.write_string(w, "; Domain=") or_return
		write_escaped_newlines(w, d)    or_return
	}

	if e, ok := c.expires_gmt.(time.Time); ok {
		io.write_string(w, "; Expires=") or_return
		date_write(w, e)                 or_return
	}

	if a, ok := c.max_age_secs.(int); ok {
		io.write_string(w, "; Max-Age=") or_return
		io.write_int(w, a)               or_return
	}

	if p, ok := c.path.(string); ok {
		io.write_string(w, "; Path=") or_return
		write_escaped_newlines(w, p)  or_return
	}

	switch c.same_site {
	case .None:   io.write_string(w, "; SameSite=None")   or_return
	case .Lax:    io.write_string(w, "; SameSite=Lax")    or_return
	case .Strict: io.write_string(w, "; SameSite=Strict") or_return
	case .Unspecified: // no-op.
	}
	// odinfmt:enable

	if c.secure {
		io.write_string(w, "; Secure") or_return
	}

	if c.partitioned {
		io.write_string(w, "; Partitioned") or_return
	}

	if c.http_only {
		io.write_string(w, "; HttpOnly") or_return
	}

	return nil
}

// Builds the Set-Cookie header string representation of the given cookie.
cookie_string :: proc(c: Cookie, allocator := context.allocator) -> string {
	b: strings.Builder
	strings.builder_init(&b, 0, 20, allocator)

	cookie_write(strings.to_writer(&b), c)

	return strings.to_string(b)
}

// TODO: check specific whitespace requirements in RFC.
//
// Allocations are done to check case-insensitive attributes but they are deleted right after.
// So, all the returned strings (inside cookie) are slices into the given value string.
cookie_parse :: proc(value: string, allocator := context.allocator) -> (cookie: Cookie, ok: bool) {
	value := value

	eq := strings.index_byte(value, '=')
	if eq < 1 { return }

	cookie._raw = value
	cookie.name = value[:eq]
	value = value[eq + 1:]

	semi := strings.index_byte(value, ';')
	switch semi {
	case -1:
		cookie.value = value
		ok = true
		return
	case 0:
		return
	case:
		cookie.value = value[:semi]
		value = value[semi + 1:]
	}

	parse_part :: proc(cookie: ^Cookie, part: string, allocator := context.temp_allocator) -> (ok: bool) {
		eq := strings.index_byte(part, '=')
		switch eq {
		case -1:
			key := strings.to_lower(part, allocator)
			defer delete(key, allocator)

			switch key {
			case "httponly":
				cookie.http_only = true
			case "partitioned":
				cookie.partitioned = true
			case "secure":
				cookie.secure = true
			case:
				return
			}
		case 0:
			return
		case:
			key := strings.to_lower(part[:eq], allocator)
			defer delete(key, allocator)

			value := part[eq + 1:]

			switch key {
			case "domain":
				cookie.domain = value
			case "expires":
				cookie.expires_gmt = cookie_date_parse(value) or_return
			case "max-age":
				cookie.max_age_secs = strconv.parse_int(value, 10) or_return
			case "path":
				cookie.path = value
			case "samesite":
				switch value {
				case "lax", "Lax", "LAX":
					cookie.same_site = .Lax
				case "none", "None", "NONE":
					cookie.same_site = .None
				case "strict", "Strict", "STRICT":
					cookie.same_site = .Strict
				case:
					return
				}
			case:
				return
			}
		}
		return true
	}

	for semi = strings.index_byte(value, ';'); semi != -1; semi = strings.index_byte(value, ';') {
		part := strings.trim_left_space(value[:semi])
		value = value[semi + 1:]
		parse_part(&cookie, part, allocator) or_return
	}

	part := strings.trim_left_space(value)
	if part == "" {
		ok = true
		return
	}

	parse_part(&cookie, part, allocator) or_return
	ok = true
	return
}

/*
Implementation of the algorithm described in RFC 6265 section 5.1.1.
*/
cookie_date_parse :: proc(value: string) -> (t: time.Time, ok: bool) {

	iter_delim :: proc(value: ^string) -> (token: string, ok: bool) {
		start := -1
		start_loop: for ch, i in transmute([]byte)value^ {
			switch ch {
			case 0x09, 0x20..=0x2F, 0x3B..=0x40, 0x5B..=0x60, 0x7B..=0x7E:
			case:
				start = i
				break start_loop
			}
		}

		if start == -1 {
			return
		}

		token = value[start:]
		length := len(token)
		end_loop: for ch, i in transmute([]byte)token {
			switch ch {
			case 0x09, 0x20..=0x2F, 0x3B..=0x40, 0x5B..=0x60, 0x7B..=0x7E:
				length = i
				break end_loop
			}
		}

		ok = true

		token  = token[:length]
		value^ = value[start+length:]
		return
	}

	parse_digits :: proc(value: string, min, max: int, trailing_ok: bool) -> (int, bool) {
		count: int
		for ch in transmute([]byte)value {
			if ch <= 0x2f || ch >= 0x3a {
				break
			}
			count += 1
		}

		if count < min || count > max {
			return 0, false
		}

		if !trailing_ok && len(value) != count {
			return 0, false
		}

		return strconv.parse_int(value[:count], 10)
	}

	parse_time :: proc(token: string) -> (t: Time, ok: bool) {
		hours, match1, tail := strings.partition(token, ":")
		if match1 != ":" { return }
		minutes, match2, seconds := strings.partition(tail,  ":")
		if match2 != ":" { return }

		t.hours   = parse_digits(hours,   1, 2, false) or_return
		t.minutes = parse_digits(minutes, 1, 2, false) or_return
		t.seconds = parse_digits(seconds, 1, 2, true)  or_return

		ok = true
		return
	}

	parse_month :: proc(token: string) -> (month: int) {
		if len(token) < 3 {
			return
		}

		lower: [3]byte
		for &ch, i in lower {
			#no_bounds_check orig := token[i]
			switch orig {
			case 'A'..='Z':
				ch = orig + 32
			case:
				ch = orig
			}
		}

		switch string(lower[:]) {
		case "jan":
			return 1
		case "feb":
			return 2
		case "mar":
			return 3
		case "apr":
			return 4
		case "may":
			return 5
		case "jun":
			return 6
		case "jul":
			return 7
		case "aug":
			return 8
		case "sep":
			return 9
		case "oct":
			return 10
		case "nov":
			return 11
		case "dec":
			return 12
		case:
			return
		}
	}

	Time :: struct {
		hours, minutes, seconds: int,
	}

	clock: Maybe(Time)
	day_of_month, month, year: Maybe(int)

	value := value
	for token in iter_delim(&value) {
		if _, has_time := clock.?; !has_time {
			if t, tok := parse_time(token); tok {
				clock = t
				continue
			}
		}

		if _, has_day_of_month := day_of_month.?; !has_day_of_month {
			if dom, dok := parse_digits(token, 1, 2, true); dok {
				day_of_month = dom
				continue
			}
		}

		if _, has_month := month.?; !has_month {
			if mon := parse_month(token); mon > 0 {
				month = mon
				continue
			}
		}

		if _, has_year := year.?; !has_year {
			if yr, yrok := parse_digits(token, 2, 4, true); yrok {

				if yr >= 70 && yr <= 99 {
					yr += 1900
				} else if yr >= 0 && yr <= 69 {
					yr += 2000
				}

				year = yr
				continue
			}
		}
	}

	c := clock.? or_return
	y := year.?  or_return

	if y < 1601 {
		return
	}

	t = time.datetime_to_time(
		y,
		month.?        or_return,
		day_of_month.? or_return,
		c.hours,
		c.minutes,
		c.seconds,
	) or_return

	ok = true
	return
}

/*
Retrieves the cookie with the given `key` out of the requests `Cookie` header.

If the same key is in the header multiple times the last one is returned.
*/
request_cookie_get :: proc(r: ^Request, key: string) -> (value: string, ok: bool) {
	cookies := headers_get_unsafe(r.headers, "cookie") or_return

	for k, v in request_cookies_iter(&cookies) {
		if key == k { return v, true }
	}

	return
}

/*
Allocates a map with the given allocator and puts all cookie pairs from the requests `Cookie` header into it.

If the same key is in the header multiple times the last one is returned.
*/
request_cookies :: proc(r: ^Request, allocator := context.temp_allocator) -> (res: map[string]string) {
	res.allocator = allocator

	cookies := headers_get_unsafe(r.headers, "cookie") or_else ""
	for k, v in request_cookies_iter(&cookies) {
		// Don't overwrite, the iterator goes from right to left and we want the last.
		if k in res { continue }

		res[k] = v
	}

	return
}

/*
Iterates the cookies from right to left.
*/
request_cookies_iter :: proc(cookies: ^string) -> (key: string, value: string, ok: bool) {
	end := len(cookies)
	eq  := -1
	for i := end-1; i >= 0; i-=1 {
		b := cookies[i]
		start := i == 0
		sep := start || b == ' ' && cookies[i-1] == ';'
		if sep {
			defer end = i - 1

			// Invalid.
			if eq < 0 {
				continue
			}

			off := 0 if start else 1

			key   = cookies[i+off:eq]
			value = cookies[eq+1:end]

			cookies^ = cookies[:i-off]

			return key, value, true
		} else if b == '=' {
			eq = i
		}
	}

	return
}


================================================
FILE: docs/all.odin
================================================
/*
This file simply imports any packages we want in the documentation.
*/
package docs

import "../client"
import http ".."
import "../openssl"

_ :: client
_ :: http
_ :: openssl


================================================
FILE: docs/generate.sh
================================================
#!/usr/bin/env bash

set -ex

cd docs

rm -rf build
mkdir build

odin doc . -all-packages -doc-format

cd build

# This is the binary of https://github.com/laytan/pkg.odin-lang.org, built by `odin built . -out:odin-doc`
odin-doc ../docs.odin-doc ../odin-doc.json

# For GitHub pages, a CNAME file with the intended domain is required.
echo "odin-http.laytan.dev" > CNAME

cd ..

rm docs.odin-doc

cd ..


================================================
FILE: docs/odin-doc.json
================================================
{
    "hide_core": true,
	"hide_base": true,
    "collections": {
        "odin-http": {
            "name": "http",
            "source_url": "https://github.com/laytan/odin-http/blob/main",
            "base_url": "/http",
            "root_path": "../..",
            "license": {
                "text": "MIT",
                "url": "https://github.com/laytan/odin-http/tree/main/LICENSE"
            },
            "home": {
                "title": "Odin-HTTP",
                "embed_readme": "../../README.md"
            }
        }
    }
}


================================================
FILE: examples/client/main.odin
================================================
package client_example

import "core:fmt"

import "../../client"

main :: proc() {
	get()
	post()
}

// basic get request.
get :: proc() {
	res, err := client.get("https://www.google.com/")
	if err != nil {
		fmt.printf("Request failed: %s", err)
		return
	}
	defer client.response_destroy(&res)

	fmt.printf("Status: %s\n", res.status)
	fmt.printf("Headers: %v\n", res.headers)
	fmt.printf("Cookies: %v\n", res.cookies)
	body, allocation, berr := client.response_body(&res)
	if berr != nil {
		fmt.printf("Error retrieving response body: %s", berr)
		return
	}
	defer client.body_destroy(body, allocation)

	fmt.println(body)
}

Post_Body :: struct {
	name:    string,
	message: string,
}

// POST request with JSON.
post :: proc() {
	req: client.Request
	client.request_init(&req, .Post)
	defer client.request_destroy(&req)

	pbody := Post_Body{"Laytan", "Hello, World!"}
	if err := client.with_json(&req, pbody); err != nil {
		fmt.printf("JSON error: %s", err)
		return
	}

	res, err := client.request(&req, "https://webhook.site/YOUR-ID-HERE")
	if err != nil {
		fmt.printf("Request failed: %s", err)
		return
	}
	defer client.response_destroy(&res)

	fmt.printf("Status: %s\n", res.status)
	fmt.printf("Headers: %v\n", res.headers)
	fmt.printf("Cookies: %v\n", res.cookies)

	body, allocation, berr := client.response_body(&res)
	if berr != nil {
		fmt.printf("Error retrieving response body: %s", berr)
		return
	}
	defer client.body_destroy(body, allocation)

	fmt.println(body)
}


================================================
FILE: examples/tcp_echo/main.odin
================================================
package example_tcp_echo

import "core:fmt"
import "core:net"
import "core:os"

import nbio "../../nbio/poly"

Echo_Server :: struct {
	io:          nbio.IO,
	sock:        net.TCP_Socket,
	connections: [dynamic]^Echo_Connection,
}

Echo_Connection :: struct {
	server:  ^Echo_Server,
	sock:    net.TCP_Socket,
	buf:     [50]byte,
}

main :: proc() {
	server: Echo_Server
	defer delete(server.connections)

	nbio.init(&server.io)
	defer nbio.destroy(&server.io)

	sock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})
	fmt.assertf(err == nil, "Error opening and listening on localhost:8080: %v", err)
	server.sock = sock

	nbio.accept(&server.io, sock, &server, echo_on_accept)

	// Start the event loop.
	errno: os.Errno
	for errno == os.ERROR_NONE {
		errno = nbio.tick(&server.io)
	}

	fmt.assertf(errno == os.ERROR_NONE, "Server stopped with error code: %v", errno)
}

echo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
	fmt.assertf(err == nil, "Error accepting a connection: %v", err)

	// Register a new accept for the next client.
	nbio.accept(&server.io, server.sock, server, echo_on_accept)

	c := new(Echo_Connection)
	c.server = server
	c.sock   = client
	append(&server.connections, c)

	nbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)
}

echo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
	fmt.assertf(err == nil, "Error receiving from client: %v", err)

	nbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)
}

echo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {
	fmt.assertf(err == nil, "Error sending to client: %v", err)

	// Accept the next message, to then ultimately echo back again.
	nbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)
}


================================================
FILE: handlers.odin
================================================
package http

import "core:net"
import "core:strconv"
import "core:sync"
import "core:time"

Handler_Proc :: proc(handler: ^Handler, req: ^Request, res: ^Response)
Handle_Proc :: proc(req: ^Request, res: ^Response)

Handler :: struct {
	user_data: rawptr,
	next:      Maybe(^Handler),
	handle:    Handler_Proc,
}

// TODO: something like http.handler_with_body which gets the body before calling the handler.

handler :: proc(handle: Handle_Proc) -> Handler {
	h: Handler
	h.user_data = rawptr(handle)

	handle := proc(h: ^Handler, req: ^Request, res: ^Response) {
		p := (Handle_Proc)(h.user_data)
		p(req, res)
	}

	h.handle = handle
	return h
}

middleware_proc :: proc(next: Maybe(^Handler), handle: Handler_Proc) -> Handler {
	h: Handler
	h.next = next
	h.handle = handle
	return h
}

Rate_Limit_On_Limit :: struct {
	user_data: rawptr,
	on_limit:  proc(req: ^Request, res: ^Response, user_data: rawptr),
}

// Convenience method to create a Rate_Limit_On_Limit that writes the given message.
rate_limit_message :: proc(message: ^string) -> Rate_Limit_On_Limit {
	return Rate_Limit_On_Limit{user_data = message, on_limit = proc(_: ^Request, res: ^Response, user_data: rawptr) {
		message := (^string)(user_data)
		body_set(res, message^)
		respond(res)
	}}
}

Rate_Limit_Opts :: struct {
	window:   time.Duration,
	max:      int,

	// Optional handler to call when a request is being rate-limited, allows you to customize the response.
	on_limit: Maybe(Rate_Limit_On_Limit),
}

Rate_Limit_Data :: struct {
	opts:       ^Rate_Limit_Opts,
	next_sweep: time.Time,
	hits:       map[net.Address]int,
	mu:         sync.Mutex,
}

rate_limit_destroy :: proc(data: ^Rate_Limit_Data) {
	sync.guard(&data.mu)
	delete(data.hits)
}

// Basic rate limit based on IP address.
rate_limit :: proc(data: ^Rate_Limit_Data, next: ^Handler, opts: ^Rate_Limit_Opts, allocator := context.allocator) -> Handler {
	assert(next != nil)

	h: Handler
	h.next = next

	data.opts = opts
	data.hits = make(map[net.Address]int, 16, allocator)
	data.next_sweep = time.time_add(time.now(), opts.window)
	h.user_data = data

	h.handle = proc(h: ^Handler, req: ^Request, res: ^Response) {
		data := (^Rate_Limit_Data)(h.user_data)

		sync.lock(&data.mu)

		// PERF: if this is not performing, we could run a thread that sweeps on a regular basis.
		if time.since(data.next_sweep) > 0 {
			clear(&data.hits)
			data.next_sweep = time.time_add(time.now(), data.opts.window)
		}

		hits := data.hits[req.client.address]
		data.hits[req.client.address] = hits + 1
		sync.unlock(&data.mu)

		if hits > data.opts.max {
			res.status = .Too_Many_Requests

			retry_dur := i64(time.diff(time.now(), data.next_sweep) / time.Second)
			buf := make([]byte, 32, context.temp_allocator)
			retry_str := strconv.write_int(buf, retry_dur, 10)
			headers_set_unsafe(&res.headers, "retry-after", retry_str)

			if on, ok := data.opts.on_limit.(Rate_Limit_On_Limit); ok {
				on.on_limit(req, res, on.user_data)
			} else {
				respond(res)
			}
			return
		}

		next := h.next.(^Handler)
		next.handle(next, req, res)
	}

	return h
}


================================================
FILE: headers.odin
================================================
package http

import "core:strings"

// A case-insensitive ASCII map for storing headers.
Headers :: struct {
	_kv:      map[string]string,
	readonly: bool,
}

headers_init :: proc(h: ^Headers, allocator := context.temp_allocator) {
	h._kv.allocator = allocator
}

headers_count :: #force_inline proc(h: Headers) -> int {
	return len(h._kv)
}

/*
Sets a header, given key is first sanitized, final (sanitized) key is returned.
*/
headers_set :: proc(h: ^Headers, k: string, v: string, loc := #caller_location) -> string {
	if h.readonly {
		panic("these headers are readonly, did you accidentally try to set a header on the request?", loc)
	}

	l := sanitize_key(h^, k)
	h._kv[l] = v
	return l
}

/*
Unsafely set header, given key is assumed to be a lowercase string and to be without newlines.
*/
headers_set_unsafe :: #force_inline proc(h: ^Headers, k: string, v: string, loc := #caller_location) {
	assert(!h.readonly, "these headers are readonly, did you accidentally try to set a header on the request?", loc)
	h._kv[k] = v
}

headers_get :: proc(h: Headers, k: string) -> (string, bool) #optional_ok {
	return h._kv[sanitize_key(h, k)]
}

/*
Unsafely get header, given key is assumed to be a lowercase string.
*/
headers_get_unsafe :: #force_inline proc(h: Headers, k: string) -> (string, bool) #optional_ok {
	return h._kv[k]
}

headers_entry :: proc(h: ^Headers, k: string, loc := #caller_location) -> (key_ptr: ^string, value_ptr: ^string, just_inserted: bool) {
	assert(!h.readonly, "these headers are readonly, did you accidentally try to set a header on the request?", loc)
	key_ptr, value_ptr, just_inserted, _ = map_entry(&h._kv, sanitize_key(h^, k))
	return
}

headers_entry_unsafe :: #force_inline proc(h: ^Headers, k: string, loc := #caller_location) -> (key_ptr: ^string, value_ptr: ^string, just_inserted: bool) {
	assert(!h.readonly, "these headers are readonly, did you accidentally try to set a header on the request?", loc)
	key_ptr, value_ptr, just_inserted, _ = map_entry(&h._kv, k)
	return
}

headers_has :: proc(h: Headers, k: string) -> bool {
	return sanitize_key(h, k) in h._kv
}

/*
Unsafely check for a header, given key is assumed to be a lowercase string.
*/
headers_has_unsafe :: #force_inline proc(h: Headers, k: string) -> bool {
	return k in h._kv
}

headers_delete :: proc(h: ^Headers, k: string) -> (deleted_key: string, deleted_value: string) {
	return delete_key(&h._kv, sanitize_key(h^, k))
}

/*
Unsafely delete a header, given key is assumed to be a lowercase string.
*/
headers_delete_unsafe :: #force_inline proc(h: ^Headers, k: string) {
	delete_key(&h._kv, k)
}

/* Common Helpers */

headers_set_content_type :: proc {
	headers_set_content_type_mime,
	headers_set_content_type_string,
}

headers_set_content_type_string :: #force_inline proc(h: ^Headers, ct: string) {
	headers_set_unsafe(h, "content-type", ct)
}

headers_set_content_type_mime :: #force_inline proc(h: ^Headers, ct: Mime_Type) {
	headers_set_unsafe(h, "content-type", mime_to_content_type(ct))
}

headers_set_close :: #force_inline proc(h: ^Headers) {
	headers_set_unsafe(h, "connection", "close")
}

/*
Escapes any newlines and converts ASCII to lowercase.
*/
@(private="package")
sanitize_key :: proc(h: Headers, k: string) -> string {
	allocator := h._kv.allocator if h._kv.allocator.procedure != nil else context.temp_allocator

	// general +4 in rare case of newlines, so we might not need to reallocate.
	b := strings.builder_make(0, len(k)+4, allocator)
	for c in k {
		switch c {
		case 'A'..='Z': strings.write_rune(&b, c + 32)
		case '\n':      strings.write_string(&b, "\\n")
		case:           strings.write_rune(&b, c)
		}
	}
	return strings.to_string(b)

	// NOTE: implementation that only allocates if needed, but we use arena's anyway so just allocating
	// some space should be about as fast?
	//
	// b: strings.Builder = ---
	// i: int
	// for c in v {
	// 	if c == '\n' || (c >= 'A' && c <= 'Z') {
	// 		b = strings.builder_make(0, len(v)+4, allocator)
	// 		strings.write_string(&b, v[:i])
	// 		alloc = true
	// 		break
	// 	}
	// 	i+=1
	// }
	//
	// if !alloc {
	// 	return v, false
	// }
	//
	// for c in v[i:] {
	//  switch c {
	//  case 'A'..='Z': strings.write_rune(&b, c + 32)
	//  case '\n':      strings.write_string(&b, "\\n")
	//  case:           strings.write_rune(&b, c)
	//  }
	// }
	//
	// return strings.to_string(b), true
}


================================================
FILE: http.odin
================================================
package http

import "base:runtime"

import "core:io"
import "core:slice"
import "core:strconv"
import "core:strings"
import "core:sync"
import "core:time"

Requestline_Error :: enum {
	None,
	Method_Not_Implemented,
	Not_Enough_Fields,
	Invalid_Version_Format,
}

Requestline :: struct {
	method:  Method,
	target:  union {
		string,
		URL,
	},
	version: Version,
}

// A request-line begins with a method token, followed by a single space
// (SP), the request-target, another single space (SP), the protocol
// version, and ends with CRLF.
//
// This allocates a clone of the target, because this is intended to be used with a scanner,
// which has a buffer that changes every read.
requestline_parse :: proc(s: string, allocator := context.temp_allocator) -> (line: Requestline, err: Requestline_Error) {
	s := s

	next_space := strings.index_byte(s, ' ')
	if next_space == -1 { return line, .Not_Enough_Fields }

	ok: bool
	line.method, ok = method_parse(s[:next_space])
	if !ok { return line, .Method_Not_Implemented }
	s = s[next_space + 1:]

	next_space = strings.index_byte(s, ' ')
	if next_space == -1 { return line, .Not_Enough_Fields }

	line.target = strings.clone(s[:next_space], allocator)
	s = s[len(line.target.(string)) + 1:]

	line.version, ok = version_parse(s)
	if !ok { return line, .Invalid_Version_Format }

	return
}

requestline_write :: proc(w: io.Writer, rline: Requestline) -> io.Error {
	// odinfmt:disable
	io.write_string(w, method_string(rline.method)) or_return // <METHOD>
	io.write_byte(w, ' ')                           or_return // <METHOD> <SP>

	switch t in rline.target {
	case string: io.write_string(w, t)              or_return // <METHOD> <SP> <TARGET>
	case URL:    request_path_write(w, t)           or_return // <METHOD> <SP> <TARGET>
	}

	io.write_byte(w, ' ')                           or_return // <METHOD> <SP> <TARGET> <SP>
	version_write(w, rline.version)                 or_return // <METHOD> <SP> <TARGET> <SP> <VERSION>
	io.write_string(w, "\r\n")                      or_return // <METHOD> <SP> <TARGET> <SP> <VERSION> <CRLF>
	// odinfmt:enable

	return nil
}

Version :: struct {
	major: u8,
	minor: u8,
}

// Parses an HTTP version string according to RFC 7230, section 2.6.
version_parse :: proc(s: string) -> (version: Version, ok: bool) {
	switch len(s) {
	case 8:
		(s[6] == '.') or_return
		version.minor = u8(int(s[7]) - '0')
		fallthrough
	case 6:
		(s[:5] == "HTTP/") or_return
		version.major = u8(int(s[5]) - '0')
	case:
		return
	}
	ok = true
	return
}

version_write :: proc(w: io.Writer, v: Version) -> io.Error {
	io.write_string(w, "HTTP/") or_return
	io.write_rune(w, '0' + rune(v.major)) or_return
	if v.minor > 0 {
		io.write_rune(w, '.')
		io.write_rune(w, '0' + rune(v.minor))
	}

	return nil
}

version_string :: proc(v: Version, allocator := context.allocator) -> string {
	buf := make([]byte, 8, allocator)

	b: strings.Builder
	b.buf = slice.into_dynamic(buf)

	version_write(strings.to_writer(&b), v)

	return strings.to_string(b)
}

Method :: enum {
	Get,
	Post,
	Delete,
	Patch,
	Put,
	Head,
	Connect,
	Options,
	Trace,
}

_method_strings := [?]string{"GET", "POST", "DELETE", "PATCH", "PUT", "HEAD", "CONNECT", "OPTIONS", "TRACE"}

method_string :: proc(m: Method) -> string #no_bounds_check {
	if m < .Get || m > .Trace { return "" }
	return _method_strings[m]
}

method_parse :: proc(m: string) -> (method: Method, ok: bool) #no_bounds_check {
	// PERF: I assume this is faster than a map with this amount of items.

	for r in Method {
		if _method_strings[r] == m {
			return r, true
		}
	}

	return nil, false
}

// Parses the header and adds it to the headers if valid. The given string is copied.
header_parse :: proc(headers: ^Headers, line: string, allocator := context.temp_allocator) -> (key: string, ok: bool) {
	// Preceding spaces should not be allowed.
	(len(line) > 0 && line[0] != ' ') or_return

	colon := strings.index_byte(line, ':')
	(colon > 0) or_return

	// There must not be a space before the colon.
	(line[colon - 1] != ' ') or_return

	// TODO/PERF: only actually relevant/needed if the key is one of these.
	has_host   := headers_has_unsafe(headers^, "host")
	cl, has_cl := headers_get_unsafe(headers^, "content-length")

	value := strings.trim_space(line[colon + 1:])
	tmp_key := sanitize_key(headers^, line[:colon])
	defer if !ok { delete(tmp_key, allocator) }

	// RFC 7230 5.4: Server MUST respond with 400 to any request
	// with multiple "Host" header fields.
	if tmp_key == "host" && has_host {
		return
	}

	// RFC 7230 3.3.3: If a message is received without Transfer-Encoding and with
	// either multiple Content-Length header fields having differing
	// field-values or a single Content-Length header field having an
	// invalid value, then the message framing is invalid and the
	// recipient MUST treat it as an unrecoverable error.
	if tmp_key == "content-length" && has_cl && cl != value {
		return
	}

	// RFC 9110 5.3: A recipient MAY combine multiple field lines within a field section
	// that have the same field name into one field line, without changing
	// the semantics of the message, by appending each subsequent field line
	// value to the initial field line value in order, separated by a comma
	// (",") and optional whitespace (OWS, defined in Section 5.6.3). For
	// consistency, use comma SP.
	key_ptr, value_ptr, just_inserted := headers_entry_unsafe(headers, tmp_key)
	if just_inserted {
		value = strings.clone(value, allocator)
	} else {
		value = strings.concatenate({value_ptr^, ", ", value}, allocator)
		delete(tmp_key, allocator)
		delete(value_ptr^, allocator)
	}
	key = key_ptr^
	value_ptr^ = value

	ok = true
	return
}

// Returns if this is a valid trailer header.
//
// RFC 7230 4.1.2:
// A sender MUST NOT generate a trailer that contains a field necessary
// for message framing (e.g., Transfer-Encoding and Content-Length),
// routing (e.g., Host), request modifiers (e.g., controls and
// conditionals in Section 5 of [RFC7231]), authentication (e.g., see
// [RFC7235] and [RFC6265]), response control data (e.g., see Section
// 7.1 of [RFC7231]), or determining how to process the payload (e.g.,
// Content-Encoding, Content-Type, Content-Range, and Trailer).
header_allowed_trailer :: proc(key: string) -> bool {
	// odinfmt:disable
	return (
		// Message framing:
		key != "transfer-encoding" &&
		key != "content-length" &&
		// Routing:
		key != "host" &&
		// Request modifiers:
		key != "if-match" &&
		key != "if-none-match" &&
		key != "if-modified-since" &&
		key != "if-unmodified-since" &&
		key != "if-range" &&
		// Authentication:
		key != "www-authenticate" &&
		key != "authorization" &&
		key != "proxy-authenticate" &&
		key != "proxy-authorization" &&
		key != "cookie" &&
		key != "set-cookie" &&
		// Control data:
		key != "age" &&
		key != "cache-control" &&
		key != "expires" &&
		key != "date" &&
		key != "location" &&
		key != "retry-after" &&
		key != "vary" &&
		key != "warning" &&
		// How to process:
		key != "content-encoding" &&
		key != "content-type" &&
		key != "content-range" &&
		key != "trailer")
	// odinfmt:enable
}

@(private)
DATE_LENGTH :: len("Fri, 05 Feb 2023 09:01:10 GMT")

// Formats a time in the HTTP header format (no timezone conversion is done, GMT expected):
// `<day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT`
date_write :: proc(w: io.Writer, t: time.Time) -> io.Error {
	year, month, day := time.date(t)
	hour, minute, second := time.clock_from_time(t)
	wday := time.weekday(t)

	// odinfmt:disable
	io.write_string(w, DAYS[wday])    or_return // 'Fri, '
	write_padded_int(w, day)          or_return // 'Fri, 05'
	io.write_string(w, MONTHS[month]) or_return // 'Fri, 05 Feb '
	io.write_int(w, year)             or_return // 'Fri, 05 Feb 2023'
	io.write_byte(w, ' ')             or_return // 'Fri, 05 Feb 2023 '
	write_padded_int(w, hour)         or_return // 'Fri, 05 Feb 2023 09'
	io.write_byte(w, ':')             or_return // 'Fri, 05 Feb 2023 09:'
	write_padded_int(w, minute)       or_return // 'Fri, 05 Feb 2023 09:01'
	io.write_byte(w, ':')             or_return // 'Fri, 05 Feb 2023 09:01:'
	write_padded_int(w, second)       or_return // 'Fri, 05 Feb 2023 09:01:10'
	io.write_string(w, " GMT")        or_return // 'Fri, 05 Feb 2023 09:01:10 GMT'
	// odinfmt:enable

	return nil
}

// Formats a time in the HTTP header format (no timezone conversion is done, GMT expected):
// `<day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT`
date_string :: proc(t: time.Time, allocator := context.allocator) -> string {
	b: strings.Builder

	buf := make([]byte, DATE_LENGTH, allocator)
	b.buf = slice.into_dynamic(buf)

	date_write(strings.to_writer(&b), t)

	return strings.to_string(b)
}

date_parse :: proc(value: string) -> (t: time.Time, ok: bool) #no_bounds_check {
	if len(value) != DATE_LENGTH { return }

	// Remove 'Fri, '
	value := value
	value = value[5:]

	// Parse '05'
	day := strconv.parse_i64_of_base(value[:2], 10) or_return
	value = value[2:]

	// Parse ' Feb ' or '-Feb-' (latter is a deprecated format but should still be parsed).
	month_index := -1
	month_str := value[1:4]
	value = value[5:]
	for month, i in MONTHS[1:] {
		if month_str == month[1:4] {
			month_index = i
			break
		}
	}
	month_index += 1
	if month_index <= 0 { return }

	year := strconv.parse_i64_of_base(value[:4], 10) or_return
	value = value[4:]

	hour := strconv.parse_i64_of_base(value[1:3], 10) or_return
	value = value[4:]

	minute := strconv.parse_i64_of_base(value[:2], 10) or_return
	value = value[3:]

	seconds := strconv.parse_i64_of_base(value[:2], 10) or_return
	value = value[3:]

	// Should have only 'GMT' left now.
	if value != "GMT" { return }

	t = time.datetime_to_time(int(year), int(month_index), int(day), int(hour), int(minute), int(seconds)) or_return
	ok = true
	return
}

request_path_write :: proc(w: io.Writer, target: URL) -> io.Error {
	// TODO: maybe net.percent_encode.

	if target.path == "" {
		io.write_byte(w, '/') or_return
	} else {
		io.write_string(w, target.path) or_return
	}

	if len(target.query) > 0 {
		io.write_byte(w, '?') or_return
		io.write_string(w, target.query) or_return
	}

	return nil
}

request_path :: proc(target: URL, allocator := context.allocator) -> (rq_path: string) {
	res := strings.builder_make(0, len(target.path), allocator)
	request_path_write(strings.to_writer(&res), target)
	return strings.to_string(res)
}

_dynamic_unwritten :: proc(d: [dynamic]$E) -> []E  {
	return (cast([^]E)raw_data(d))[len(d):cap(d)]
}

_dynamic_add_len :: proc(d: ^[dynamic]$E, len: int) {
	(transmute(^runtime.Raw_Dynamic_Array)d).len += len
}

@(private)
write_padded_int :: proc(w: io.Writer, i: int) -> io.Error {
	if i < 10 {
		io.write_string(w, PADDED_NUMS[i]) or_return
		return nil
	}

	_, err := io.write_int(w, i)
	return err
}

@(private)
write_escaped_newlines :: proc(w: io.Writer, v: string) -> io.Error {
	for c in v {
		if c == '\n' {
			io.write_string(w, "\\n") or_return
		} else {
			io.write_rune(w, c) or_return
		}
	}
	return nil
}

@(private)
PADDED_NUMS := [10]string{"00", "01", "02", "03", "04", "05", "06", "07", "08", "09"}

@(private)
DAYS := [7]string{"Sun, ", "Mon, ", "Tue, ", "Wed, ", "Thu, ", "Fri, ", "Sat, "}

@(private)
MONTHS := [13]string {
	" ", // Jan is 1, so 0 should never be accessed.
	" Jan ",
	" Feb ",
	" Mar ",
	" Apr ",
	" May ",
	" Jun ",
	" Jul ",
	" Aug ",
	" Sep ",
	" Oct ",
	" Nov ",
	" Dec ",
}

@(private)
Atomic :: struct($T: typeid) {
	raw: T,
}

@(private)
atomic_store :: #force_inline proc(a: ^Atomic($T), val: T) {
	sync.atomic_store(&a.raw, val)
}

@(private)
atomic_load :: #force_inline proc(a: ^Atomic($T)) -> T {
	return sync.atomic_load(&a.raw)
}

import "core:testing"

@(test)
test_dynamic_unwritten :: proc(t: ^testing.T) {
	{
		d  := make([dynamic]int, 4, 8)
		du := _dynamic_unwritten(d)

		testing.expect(t, len(du) == 4)
	}

	{
		d := slice.into_dynamic([]int{1, 2, 3, 4, 5})
		_dynamic_add_len(&d, 3)
		du := _dynamic_unwritten(d)

		testing.expect(t, len(d)  == 3)
		testing.expect(t, len(du) == 2)
		testing.expect(t, du[0] == 4)
		testing.expect(t, du[1] == 5)
	}

	{
		d := slice.into_dynamic([]int{})
		du := _dynamic_unwritten(d)

		testing.expect(t, len(du) == 0)
	}
}


================================================
FILE: mimes.odin
================================================
package http

import "core:path/filepath"

Mime_Type :: enum {
	Plain,

	Css,
	Csv,
	Gif,
	Html,
	Ico,
	Jpeg,
	Js,
	Json,
	Png,
	Svg,
	Url_Encoded,
	Xml,
	Zip,
	Wasm,
}

mime_from_extension :: proc(s: string) -> Mime_Type {
	//odinfmt:disable
	switch filepath.ext(s) {
	case ".html": return .Html
	case ".js":   return .Js
	case ".css":  return .Css
	case ".csv":  return .Csv
	case ".xml":  return .Xml
	case ".zip":  return .Zip
	case ".json": return .Json
	case ".ico":  return .Ico
	case ".gif":  return .Gif
	case ".jpeg": return .Jpeg
	case ".png":  return .Png
	case ".svg":  return .Svg
	case ".wasm": return .Wasm
	case:         return .Plain
	}
	//odinfmt:enable
}

@(private="file")
_mime_to_content_type := [Mime_Type]string{
	.Plain       = "text/plain",

	.Css         = "text/css",
	.Csv         = "text/csv",
	.Gif         = "image/gif",
	.Html        = "text/html",
	.Ico         = "application/vnd.microsoft.ico",
	.Jpeg        = "image/jpeg",
	.Js          = "application/javascript",
	.Json        = "application/json",
	.Png         = "image/png",
	.Svg         = "image/svg+xml",
	.Url_Encoded = "application/x-www-form-urlencoded",
	.Xml         = "text/xml",
	.Zip         = "application/zip",
	.Wasm        = "application/wasm",
}

mime_to_content_type :: proc(m: Mime_Type) -> string {
	return _mime_to_content_type[m]
}


================================================
FILE: mod.pkg
================================================
{
	"version": "0.0.4-beta",
	"description": "A HTTP/1.1 client/server implementation",
	"url": "https://github.com/laytan/odin-http",
	"readme": "README.md",
	"license": "MIT",
	"keywords": ["HTTP"]
}


================================================
FILE: odinfmt.json
================================================
{
	"character_width": 120,
	"tabs": true,
	"tabs_width": 4
}


================================================
FILE: old_nbio/README.md
================================================
# package nbio

Package nbio implements a non blocking IO abstraction layer over several platform specific APIs.

This package implements an event loop based abstraction.

*TODO:*

- Benchmarking
- Some UDP implementations

*APIs:*

- Windows: [IOCP (IO Completion Ports)](https://en.wikipedia.org/wiki/Input/output_completion_port)
- Linux:   [io_uring](https://en.wikipedia.org/wiki/Io_uring)
- Darwin:  [KQueue](https://en.wikipedia.org/wiki/Kqueue)

*How to read the code:*

The file nbio.odin can be read a little bit like a header file,
it has all the procedures heavily explained and commented and dispatches them to platform specific code.

You can also have a look at the tests for more general usages, the example below or the generated docs even further below.

```odin
/*
This example shows a simple TCP server that echos back anything it receives.

Better error handling and closing/freeing connections are left for the reader.
*/
package main

import "core:fmt"
import "core:net"
import "core:os"

import nbio "nbio/poly"

Echo_Server :: struct {
	io:          nbio.IO,
	sock:        net.TCP_Socket,
	connections: [dynamic]^Echo_Connection,
}

Echo_Connection :: struct {
	server:  ^Echo_Server,
	sock:    net.TCP_Socket,
	buf:     [50]byte,
}

main :: proc() {
	server: Echo_Server
	defer delete(server.connections)

	nbio.init(&server.io)
	defer nbio.destroy(&server.io)

	sock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})
	fmt.assertf(err == nil, "Error opening and listening on localhost:8080: %v", err)
	server.sock = sock

	nbio.accept(&server.io, sock, &server, echo_on_accept)

	// Start the event loop.
	errno: os.Errno
	for errno == os.ERROR_NONE {
		errno = nbio.tick(&server.io)
	}

	fmt.assertf(errno == os.ERROR_NONE, "Server stopped with error code: %v", errno)
}

echo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
	fmt.assertf(err == nil, "Error accepting a connection: %v", err)

	// Register a new accept for the next client.
	nbio.accept(&server.io, server.sock, server, echo_on_accept)

	c := new(Echo_Connection)
	c.server = server
	c.sock   = client
	append(&server.connections, c)

	nbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)
}

echo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
	fmt.assertf(err == nil, "Error receiving from client: %v", err)

	nbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)
}

echo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {
	fmt.assertf(err == nil, "Error sending to client: %v", err)

	// Accept the next message, to then ultimately echo back again.
	nbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)
}
```


================================================
FILE: old_nbio/_io_uring/os.odin
================================================
#+build linux
package io_uring

import "core:math"
import "core:os"
import "core:sync"
import "core:sys/linux"
import "core:sys/unix"

DEFAULT_THREAD_IDLE_MS :: 1000
DEFAULT_ENTRIES :: 32
MAX_ENTRIES :: 4096

IO_Uring_Error :: enum {
	None,
	Entries_Zero,
	Entries_Not_Power_Of_Two,
	Entries_Too_Large,
	Params_Outside_Accessible_Address_Space,
	Arguments_Invalid,
	Process_Fd_Quota_Exceeded,
	System_Fd_Quota_Exceeded,
	System_Resources,
	Permission_Denied,
	System_Outdated,
	Submission_Queue_Full,
	File_Descriptor_Invalid,
	Completion_Queue_Overcommitted,
	Submission_Queue_Entry_Invalid,
	Buffer_Invalid,
	Ring_Shutting_Down,
	Opcode_Not_Supported,
	Signal_Interrupt,
	Unexpected,
}

IO_Uring :: struct {
	fd:       os.Handle,
	sq:       Submission_Queue,
	cq:       Completion_Queue,
	flags:    u32,
	features: u32,
}

// Set up an IO_Uring with default parameters, `entries` must be a power of 2 between 1 and 4096.
io_uring_make :: proc(
	params: ^io_uring_params,
	entries: u32 = DEFAULT_ENTRIES,
	flags: u32 = 0,
) -> (
	ring: IO_Uring,
	err: IO_Uring_Error,
) {
	params.flags = flags
	params.sq_thread_idle = DEFAULT_THREAD_IDLE_MS
	err = io_uring_init(&ring, entries, params)
	return
}

// Initialize and setup a io_uring with more control than io_uring_make.
io_uring_init :: proc(ring: ^IO_Uring, entries: u32, params: ^io_uring_params) -> (err: IO_Uring_Error) {
	check_entries(entries) or_return

	res := sys_io_uring_setup(entries, params)
	if res < 0 {
		#partial switch os.Platform_Error(-res) {
		case .EFAULT:
			return .Params_Outside_Accessible_Address_Space
		// The resv array contains non-zero data, p.flags contains an unsupported flag,
		// entries out of bounds, IORING_SETUP_SQ_AFF was specified without IORING_SETUP_SQPOLL,
		// or IORING_SETUP_CQSIZE was specified but linux.io_uring_params.cq_entries was invalid:
		case .EINVAL:
			return .Arguments_Invalid
		case .EMFILE:
			return .Process_Fd_Quota_Exceeded
		case .ENFILE:
			return .System_Fd_Quota_Exceeded
		case .ENOMEM:
			return .System_Resources
		// IORING_SETUP_SQPOLL was specified but effective user ID lacks sufficient privileges,
		// or a container seccomp policy prohibits io_uring syscalls:
		case .EPERM:
			return .Permission_Denied
		case .ENOSYS:
			return .System_Outdated
		case:
			return .Unexpected
		}
	}

	fd := os.Handle(res)

	// Unsupported features.
	assert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)
	assert((params.flags & IORING_SETUP_CQE32) == 0)
	assert((params.flags & IORING_SETUP_SQE128) == 0)

	sq, ok := submission_queue_make(fd, params)
	if !ok { return .System_Resources }

	ring.fd = fd
	ring.sq = sq
	ring.cq = completion_queue_make(fd, params, &sq)
	ring.flags = params.flags
	ring.features = params.features

	return
}

// Checks if the entries conform to the kernel rules.
@(private)
check_entries :: proc(entries: u32) -> (err: IO_Uring_Error) {
	switch {
	case entries >= MAX_ENTRIES:
		err = .Entries_Too_Large
	case entries == 0:
		err = .Entries_Zero
	case !math.is_power_of_two(int(entries)):
		err = .Entries_Not_Power_Of_Two
	case:
		err = .None
	}
	return
}

io_uring_destroy :: proc(ring: ^IO_Uring) {
	assert(ring.fd >= 0)
	submission_queue_destroy(&ring.sq)
	os.close(ring.fd)
	ring.fd = -1
}

// Returns a pointer to a vacant submission queue entry, or an error if the submission queue is full.
get_sqe :: proc(ring: ^IO_Uring) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {
	sq := &ring.sq
	head: u32 = sync.atomic_load_explicit(sq.head, .Acquire)
	next := sq.sqe_tail + 1

	if int(next - head) > len(sq.sqes) {
		err = .Submission_Queue_Full
		return
	}

	sqe = &sq.sqes[sq.sqe_tail & sq.mask]
	sqe^ = {}

	sq.sqe_tail = next
	return
}

// Submits the submission queue entries acquired via get_sqe().
// Returns the number of entries submitted.
// Optionally wait for a number of events by setting wait_nr.
submit :: proc(ring: ^IO_Uring, wait_nr: u32 = 0) -> (n_submitted: u32, err: IO_Uring_Error) {
	n_submitted = flush_sq(ring)
	flags: u32 = 0
	if sq_ring_needs_enter(ring, &flags) || wait_nr > 0 {
		if wait_nr > 0 || ring.flags & IORING_SETUP_IOPOLL != 0 {
			flags |= IORING_ENTER_GETEVENTS
		}
		n_submitted, err = enter(ring, n_submitted, wait_nr, flags)
	}
	return
}

// Tells the kernel that submission queue entries were submitted and/or we want to wait for their completion queue entries.
// Returns the number of submission queue entries that were submitted.
enter :: proc(
	ring: ^IO_Uring,
	n_to_submit: u32,
	min_complete: u32,
	flags: u32,
) -> (
	n_submitted: u32,
	err: IO_Uring_Error,
) {
	assert(ring.fd >= 0)
	ns := sys_io_uring_enter(u32(ring.fd), n_to_submit, min_complete, flags, nil)
	if ns < 0 {
		#partial switch os.Platform_Error(-ns) {
		case .NONE:
			err = .None
		case .EAGAIN:
			// The kernel was unable to allocate memory or ran out of resources for the request. (try again)
			err = .System_Resources
		case .EBADF:
			// The SQE `fd` is invalid, or `IOSQE_FIXED_FILE` was set but no files were registered
			err = .File_Descriptor_Invalid
		// case os.EBUSY: // TODO: why is this not in os_linux
		// 	// Attempted to overcommit the number of requests it can have pending. Should wait for some completions and try again.
		// 	err = .Completion_Queue_Overcommitted
		case .EINVAL:
			// The SQE is invalid, or valid but the ring was setup with `IORING_SETUP_IOPOLL`
			err = .Submission_Queue_Entry_Invalid
		case .EFAULT:
			// The buffer is outside the process' accessible address space, or `IORING_OP_READ_FIXED`
			// or `IORING_OP_WRITE_FIXED` was specified but no buffers were registered, or the range
			// described by `addr` and `len` is not within the buffer registered at `buf_index`
			err = .Buffer_Invalid
		case .ENXIO:
			err = .Ring_Shutting_Down
		case .EOPNOTSUPP:
			// The kernel believes the `fd` doesn't refer to an `io_uring`, or the opcode isn't supported by this kernel (more likely)
			err = .Opcode_Not_Supported
		case .EINTR:
			// The op was interrupted by a delivery of a signal before it could complete.This can happen while waiting for events with `IORING_ENTER_GETEVENTS`
			err = .Signal_Interrupt
		case:
			err = .Unexpected
		}
		return
	}

	n_submitted = u32(ns)
	return
}

// Sync internal state with kernel ring state on the submission queue side.
// Returns the number of all pending events in the submission queue.
// Rationale is to determine that an enter call is needed.
flush_sq :: proc(ring: ^IO_Uring) -> (n_pending: u32) {
	sq := &ring.sq
	to_submit := sq.sqe_tail - sq.sqe_head
	if to_submit != 0 {
		tail := sq.tail^
		i: u32 = 0
		for ; i < to_submit; i += 1 {
			sq.array[tail & sq.mask] = sq.sqe_head & sq.mask
			tail += 1
			sq.sqe_head += 1
		}
		sync.atomic_store_explicit(sq.tail, tail, .Release)
	}
	n_pending = sq_ready(ring)
	return
}

// Returns true if we are not using an SQ thread (thus nobody submits but us),
// or if IORING_SQ_NEED_WAKEUP is set and the SQ thread must be explicitly awakened.
// For the latter case, we set the SQ thread wakeup flag.
// Matches the implementation of sq_ring_needs_enter() in liburing.
sq_ring_needs_enter :: proc(ring: ^IO_Uring, flags: ^u32) -> bool {
	assert(flags^ == 0)
	if ring.flags & IORING_SETUP_SQPOLL == 0 { return true }
	if sync.atomic_load_explicit(ring.sq.flags, .Relaxed) & IORING_SQ_NEED_WAKEUP != 0 {
		flags^ |= IORING_ENTER_SQ_WAKEUP
		return true
	}
	return false
}

// Returns the number of submission queue entries in the submission queue.
sq_ready :: proc(ring: ^IO_Uring) -> u32 {
	// Always use the shared ring state (i.e. head and not sqe_head) to avoid going out of sync,
	// see https://github.com/axboe/liburing/issues/92.
	return ring.sq.sqe_tail - sync.atomic_load_explicit(ring.sq.head, .Acquire)
}

// Returns the number of completion queue entries in the completion queue (yet to consume).
cq_ready :: proc(ring: ^IO_Uring) -> (n_ready: u32) {
	return sync.atomic_load_explicit(ring.cq.tail, .Acquire) - ring.cq.head^
}

// Copies as many CQEs as are ready, and that can fit into the destination `cqes` slice.
// If none are available, enters into the kernel to wait for at most `wait_nr` CQEs.
// Returns the number of CQEs copied, advancing the CQ ring.
// Provides all the wait/peek methods found in liburing, but with batching and a single method.
copy_cqes :: proc(ring: ^IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) -> (n_copied: u32, err: IO_Uring_Error) {
	n_copied = copy_cqes_ready(ring, cqes)
	if n_copied > 0 { return }
	if wait_nr > 0 || cq_ring_needs_flush(ring) {
		_ = enter(ring, 0, wait_nr, IORING_ENTER_GETEVENTS) or_return
		n_copied = copy_cqes_ready(ring, cqes)
	}
	return
}

copy_cqes_ready :: proc(ring: ^IO_Uring, cqes: []io_uring_cqe) -> (n_copied: u32) {
	n_ready := cq_ready(ring)
	n_copied = min(u32(len(cqes)), n_ready)
	head := ring.cq.head^
	tail := head + n_copied

	i := 0
	for head != tail {
		cqes[i] = ring.cq.cqes[head & ring.cq.mask]
		head += 1
		i += 1
	}
	cq_advance(ring, n_copied)
	return
}

cq_ring_needs_flush :: proc(ring: ^IO_Uring) -> bool {
	return sync.atomic_load_explicit(ring.sq.flags, .Relaxed) & IORING_SQ_CQ_OVERFLOW != 0
}

// For advanced use cases only that implement custom completion queue methods.
// If you use copy_cqes() or copy_cqe() you must not call cqe_seen() or cq_advance().
// Must be called exactly once after a zero-copy CQE has been processed by your application.
// Not idempotent, calling more than once will result in other CQEs being lost.
// Matches the implementation of cqe_seen() in liburing.
cqe_seen :: proc(ring: ^IO_Uring) {
	cq_advance(ring, 1)
}

// For advanced use cases only that implement custom completion queue methods.
// Matches the implementation of cq_advance() in liburing.
cq_advance :: proc(ring: ^IO_Uring, count: u32) {
	if count == 0 { return }
	sync.atomic_store_explicit(ring.cq.head, ring.cq.head^ + count, .Release)
}

// Queues (but does not submit) an SQE to perform an `fsync(2)`.
// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
fsync :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	fd: os.Handle,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = .FSYNC
	sqe.rw_flags = i32(flags)
	sqe.fd = i32(fd)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform a no-op.
// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
// A no-op is more useful than may appear at first glance.
// For example, you could call `drain_previous_sqes()` on the returned SQE, to use the no-op to
// know when the ring is idle before acting on a kill signal.
nop :: proc(ring: ^IO_Uring, user_data: u64) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = .NOP
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform a `read(2)`.
read :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	fd: os.Handle,
	buf: []u8,
	offset: u64,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = .READ
	sqe.fd = i32(fd)
	sqe.addr = cast(u64)uintptr(raw_data(buf))
	sqe.len = u32(len(buf))
	sqe.off = offset
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform a `write(2)`.
write :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	fd: os.Handle,
	buf: []u8,
	offset: u64,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = .WRITE
	sqe.fd = i32(fd)
	sqe.addr = cast(u64)uintptr(raw_data(buf))
	sqe.len = u32(len(buf))
	sqe.off = offset
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform an `accept4(2)` on a socket.
// `addr`,`addr_len` optional
accept :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	sockfd: os.Socket,
	addr: ^os.SOCKADDR = nil,
	addr_len: ^os.socklen_t = nil,
	flags: u32 = 0,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.ACCEPT
	sqe.fd = i32(sockfd)
	sqe.addr = cast(u64)uintptr(addr)
	sqe.off = cast(u64)uintptr(addr_len)
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

// Queue (but does not submit) an SQE to perform a `connect(2)` on a socket.
connect :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	sockfd: os.Socket,
	addr: ^os.SOCKADDR,
	addr_len: os.socklen_t,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.CONNECT
	sqe.fd = i32(sockfd)
	sqe.addr = cast(u64)uintptr(addr)
	sqe.off = cast(u64)addr_len
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform a `recv(2)`.
recv :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	sockfd: os.Socket,
	buf: []byte,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.RECV
	sqe.fd = i32(sockfd)
	sqe.addr = cast(u64)uintptr(raw_data(buf))
	sqe.len = cast(u32)uintptr(len(buf))
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform a `send(2)`.
send :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	sockfd: os.Socket,
	buf: []byte,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.SEND
	sqe.fd = i32(sockfd)
	sqe.addr = cast(u64)uintptr(raw_data(buf))
	sqe.len = u32(len(buf))
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform an `openat(2)`.
openat :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	fd: os.Handle,
	path: cstring,
	mode: u32,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.OPENAT
	sqe.fd = i32(fd)
	sqe.addr = cast(u64)transmute(uintptr)path
	sqe.len = mode
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to perform a `close(2)`.
close :: proc(ring: ^IO_Uring, user_data: u64, fd: os.Handle) -> (sqe: ^io_uring_sqe, err: IO_Uring_Error) {
	sqe, err = get_sqe(ring)
	if err != .None {return}
	sqe.opcode = IORING_OP.CLOSE
	sqe.fd = i32(fd)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to register a timeout operation.
// Returns a pointer to the SQE.
//
// The timeout will complete when either the timeout expires, or after the specified number of
// events complete (if `count` is greater than `0`).
//
// `flags` may be `0` for a relative timeout, or `IORING_TIMEOUT_ABS` for an absolute timeout.
//
// The completion event result will be `-ETIME` if the timeout completed through expiration,
// `0` if the timeout completed after the specified number of events, or `-ECANCELED` if the
// timeout was removed before it expired.
//
// io_uring timeouts use the `CLOCK.MONOTONIC` clock source.
timeout :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	ts: ^linux.Time_Spec,
	count: u32,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.TIMEOUT
	sqe.fd = -1
	sqe.addr = cast(u64)uintptr(ts)
	sqe.len = 1
	sqe.off = u64(count)
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to remove an existing timeout operation.
// Returns a pointer to the SQE.
//
// The timeout is identified by its `user_data`.
//
// The completion event result will be `0` if the timeout was found and cancelled successfully,
// `-EBUSY` if the timeout was found but expiration was already in progress, or
// `-ENOENT` if the timeout was not found.
timeout_remove :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	timeout_user_data: u64,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.TIMEOUT_REMOVE
	sqe.fd = -1
	sqe.addr = timeout_user_data
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

// Queues (but does not submit) an SQE to add a link timeout operation.
// Returns a pointer to the SQE.
//
// You need to set linux.IOSQE_IO_LINK to flags of the target operation
// and then call this method right after the target operation.
// See https://lwn.net/Articles/803932/ for detail.
//
// If the dependent request finishes before the linked timeout, the timeout
// is canceled. If the timeout finishes before the dependent request, the
// dependent request will be canceled.
//
// The completion event result of the link_timeout will be
// `-ETIME` if the timeout finishes before the dependent request
// (in this case, the completion event result of the dependent request will
// be `-ECANCELED`), or
// `-EALREADY` if the dependent request finishes before the linked timeout.
link_timeout :: proc(
	ring: ^IO_Uring,
	user_data: u64,
	ts: ^os.Unix_File_Time,
	flags: u32,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe = get_sqe(ring) or_return
	sqe.opcode = IORING_OP.LINK_TIMEOUT
	sqe.fd = -1
	sqe.addr = cast(u64)uintptr(ts)
	sqe.len = 1
	sqe.rw_flags = i32(flags)
	sqe.user_data = user_data
	return
}

poll_add :: proc(
	ring:      ^IO_Uring,
	user_data: u64,
	fd:        os.Handle,
	events:    linux.Fd_Poll_Events,
	flags:     IORing_Poll_Flags,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe             = get_sqe(ring) or_return
	sqe.opcode      = IORING_OP.POLL_ADD
	sqe.fd          = i32(fd)
	sqe.poll_events = transmute(u16)events
	sqe.len         = transmute(u32)flags
	sqe.user_data   = user_data
	return
}

poll_remove :: proc(
	ring:      ^IO_Uring,
	user_data: u64,
	fd:        os.Handle,
	events:    linux.Fd_Poll_Events,
) -> (
	sqe: ^io_uring_sqe,
	err: IO_Uring_Error,
) {
	sqe             = get_sqe(ring) or_return
	sqe.opcode      = IORING_OP.POLL_REMOVE
	sqe.fd          = i32(fd)
	sqe.poll_events = transmute(u16)events
	sqe.user_data   = user_data
	return
}

Submission_Queue :: struct {
	head:      ^u32,
	tail:      ^u32,
	mask:      u32,
	flags:     ^u32,
	dropped:   ^u32,
	array:     []u32,
	sqes:      []io_uring_sqe,
	mmap:      []u8,
	mmap_sqes: []u8,

	// We use `sqe_head` and `sqe_tail` in the same way as liburing:
	// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
	// We then set `tail` to `sqe_tail` once, only when these events are actually submitted.
	// This allows us to amortize the cost of the @atomicStore to `tail` across multiple SQEs.
	sqe_head:  u32,
	sqe_tail:  u32,
}

submission_queue_make :: proc(fd: os.Handle, params: ^io_uring_params) -> (sq: Submission_Queue, ok: bool) {
	assert(fd >= 0)
	// Unsupported feature.
	assert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)

	sq_size := params.sq_off.array + params.sq_entries * size_of(u32)
	cq_size := params.cq_off.cqes + params.cq_entries * size_of(io_uring_cqe)
	size := max(sq_size, cq_size)

	mmap_result := unix.sys_mmap(
		nil,
		uint(size),
		unix.PROT_READ | unix.PROT_WRITE,
		unix.MAP_SHARED,
		/* | unix.MAP_POPULATE */
		int(fd),
		IORING_OFF_SQ_RING,
	)
	if mmap_result < 0 { return }
	defer if !ok { unix.sys_munmap(rawptr(uintptr(mmap_result)), uint(size)) }

	mmap := transmute([^]u8)uintptr(mmap_result)

	size_sqes := params.sq_entries * size_of(io_uring_sqe)
	mmap_sqes_result := unix.sys_mmap(
		nil,
		uint(size_sqes),
		unix.PROT_READ | unix.PROT_WRITE,
		unix.MAP_SHARED,
		/* | unix.MAP_POPULATE */
		int(fd),
		IORING_OFF_SQES,
	)
	if mmap_sqes_result < 0 { return }

	array := cast([^]u32)&mmap[params.sq_off.array]
	sqes := cast([^]io_uring_sqe)uintptr(mmap_sqes_result)
	mmap_sqes := cast([^]u8)uintptr(mmap_sqes_result)


	sq.head = cast(^u32)&mmap[params.sq_off.head]
	sq.tail = cast(^u32)&mmap[params.sq_off.tail]
	sq.mask = (cast(^u32)&mmap[params.sq_off.ring_mask])^
	sq.flags = cast(^u32)&mmap[params.sq_off.flags]
	sq.dropped = cast(^u32)&mmap[params.sq_off.dropped]
	sq.array = array[:params.sq_entries]
	sq.sqes = sqes[:params.sq_entries]
	sq.mmap = mmap[:size]
	sq.mmap_sqes = mmap_sqes[:size_sqes]

	ok = true
	return
}

submission_queue_destroy :: proc(sq: ^Submission_Queue) {
	unix.sys_munmap(raw_data(sq.mmap), uint(len(sq.mmap)))
	unix.sys_munmap(raw_data(sq.mmap_sqes), uint(len(sq.mmap)))
}

Completion_Queue :: struct {
	head:     ^u32,
	tail:     ^u32,
	mask:     u32,
	overflow: ^u32,
	cqes:     []io_uring_cqe,
}

completion_queue_make :: proc(fd: os.Handle, params: ^io_uring_params, sq: ^Submission_Queue) -> Completion_Queue {
	assert(fd >= 0)
	// Unsupported feature.
	assert((params.features & IORING_FEAT_SINGLE_MMAP) != 0)

	mmap := sq.mmap
	cqes := cast([^]io_uring_cqe)&mmap[params.cq_off.cqes]

	return(
		{
			head = cast(^u32)&mmap[params.cq_off.head],
			tail = cast(^u32)&mmap[params.cq_off.tail],
			mask = (cast(^u32)&mmap[params.cq_off.ring_mask])^,
			overflow = cast(^u32)&mmap[params.cq_off.overflow],
			cqes = cqes[:params.cq_entries],
		} \
	)
}


================================================
FILE: old_nbio/_io_uring/sys.odin
================================================
#+build linux
package io_uring

import "base:intrinsics"

//odinfmt:disable
SYS_io_uring_setup:    uintptr : 425
SYS_io_uring_enter:    uintptr : 426
SYS_io_uring_register: uintptr : 427
//odinfmt:enable

NSIG :: 65

sigset_t :: [1024 / 32]u32

io_uring_params :: struct {
	sq_entries:     u32,
	cq_entries:     u32,
	flags:          u32,
	sq_thread_cpu:  u32,
	sq_thread_idle: u32,
	features:       u32,
	wq_fd:          u32,
	resv:           [3]u32,
	sq_off:         io_sqring_offsets,
	cq_off:         io_cqring_offsets,
}
#assert(size_of(io_uring_params) == 120)

io_sqring_offsets :: struct {
	head:         u32,
	tail:         u32,
	ring_mask:    u32,
	ring_entries: u32,
	flags:        u32,
	dropped:      u32,
	array:        u32,
	resv1:        u32,
	user_addr:    u64,
}

io_cqring_offsets :: struct {
	head:         u32,
	tail:         u32,
	ring_mask:    u32,
	ring_entries: u32,
	overflow:     u32,
	cqes:         u32,
	flags:        u32,
	resv1:        u32,
	user_addr:    u64,
}

// Submission queue entry.
io_uring_sqe :: struct {
	opcode:           IORING_OP, // u8
	flags:            u8, /* IOSQE_ flags */
	ioprio:           u16, /* ioprio for the request */
	fd:               i32, /* file descriptor to do IO on */
	using __offset:   struct #raw_union {
		off:     u64, /* offset into file */
		addr2:   u64,
		using _: struct {
			cmd_op: u32,
			__pad1: u32,
		},
	},
	using __iovecs:   struct #raw_union {
		addr:          u64, /* pointer to buffer or iovecs */
		splice_off_in: u64,
	},
	len:              u32, /* buffer size or number of iovecs */
	using __contents: struct #raw_union {
		rw_flags:         i32,
		fsync_flags:      u32,
		poll_events:      u16, /* compatibility */
		poll32_events:    u32, /* word-reversed for BE */
		sync_range_flags: u32,
		msg_flags:        u32,
		timeout_flags:    u32,
		accept_flags:     u32,
		cancel_flags:     u32,
		open_flags:       u32,
		statx_flags:      u32,
		fadvise_advice:   u32,
		splice_flags:     u32,
		rename_flags:     u32,
		unlink_flags:     u32,
		hardlink_flags:   u32,
		xattr_flags:      u32,
		msg_ring_flags:   u32,
		uring_cmd_flags:  u32,
	},
	user_data:        u64, /* data to be passed back at completion time */
	/* pack this to avoid bogus arm OABI complaints */
	using __buffer:   struct #raw_union {
		/* index into fixed buffers, if used */
		buf_index: u16,
		/* for grouped buffer selection */
		buf_group: u16,
	},
	/* personality to use, if used */
	personality:      u16,
	using _:          struct #raw_union {
		splice_fd_in: i32,
		file_index:   u32,
		using _:      struct {
			addr_len: u16,
			__pad3:   [1]u16,
		},
	},
	using __:         struct #raw_union {
		using _: struct {
			addr3:  u64,
			__pad2: [1]u64,
		},
		/*
		 * If the ring is initialized with IORING_SETUP_SQE128, then
		 * this field is used for 80 bytes of arbitrary command data
		 * NOTE: This is currently not supported.
		 */
		// cmd:     [^]u8,
	},
}
#assert(size_of(io_uring_sqe) == 64)

// Completion queue entry.
io_uring_cqe :: struct {
	user_data: u64, /* sq.data submission passed back */
	res:       i32, /* result code for this event */
	flags:     u32,
	/*
	 * If the ring is initialized with IORING_SETUP_CQE32, then this field
	 * contains 16-bytes of padding, doubling the size of the CQE.
	 * NOTE: This is currently not supported.
	 */
	// big_cqe:   [^]u64,
}
#assert(size_of(io_uring_cqe) == 16)

/*
 * sqe.flags
 */
/* use fixed fileset */
IOSQE_FIXED_FILE: u32 : (1 << 0)
/* issue after inflight IO */
IOSQE_IO_DRAIN: u32 : (1 << 1)
/* links next sqe */
IOSQE_IO_LINK: u32 : (1 << 2)
/* like LINK, but stronger */
IOSQE_IO_HARDLINK: u32 : (1 << 3)
/* always go async */
IOSQE_ASYNC: u32 : (1 << 4)
/* select buffer from sq.buf_group */
IOSQE_BUFFER_SELECT: u32 : (1 << 5)
/* don't post CQE if request succeeded */
IOSQE_CQE_SKIP_SUCCESS: u32 : (1 << 6)

/*
 * io_uring_setup() flags
 */
IORING_SETUP_IOPOLL: u32 : (1 << 0) /* io_context is polled */
IORING_SETUP_SQPOLL: u32 : (1 << 1) /* SQ poll thread */
IORING_SETUP_SQ_AFF: u32 : (1 << 2) /* sq_thread_cpu is valid */
IORING_SETUP_CQSIZE: u32 : (1 << 3) /* app defines CQ size */
IORING_SETUP_CLAMP: u32 : (1 << 4) /* clamp SQ/CQ ring sizes */
IORING_SETUP_ATTACH_WQ: u32 : (1 << 5) /* attach to existing wq */
IORING_SETUP_R_DISABLED: u32 : (1 << 6) /* start with ring disabled */
IORING_SETUP_SUBMIT_ALL: u32 : (1 << 7) /* continue submit on error */
// Cooperative task running. When requests complete, they often require
// forcing the submitter to transition to the kernel to complete. If this
// flag is set, work will be done when the task transitions anyway, rather
// than force an inter-processor interrupt reschedule. This avoids interrupting
// a task running in userspace, and saves an IPI.
IORING_SETUP_COOP_TASKRUN: u32 : (1 << 8)
// If COOP_TASKRUN is set, get notified if task work is available for
// running and a kernel transition would be needed to run it. This sets
// IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
IORING_SETUP_TASKRUN_FLAG: u32 : (1 << 9)
IORING_SETUP_SQE128: u32 : (1 << 10) /* SQEs are 128 byte */
IORING_SETUP_CQE32: u32 : (1 << 11) /* CQEs are 32 byte */
// Only one task is allowed to submit requests
IORING_SETUP_SINGLE_ISSUER: u32 : (1 << 12)
// Defer running task work to get events.
// Rather than running bits of task work whenever the task transitions
// try to do it just before it is needed.
IORING_SETUP_DEFER_TASKRUN: u32 : (1 << 13)

/*
 * sqe.uring_cmd_flags
 * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
 *				along with setting sqe.buf_index.
 */
IORING_URING_CMD_FIXED: u32 : (1 << 0)

/*
 * sqe.fsync_flags
 */
IORING_FSYNC_DATASYNC: u32 : (1 << 0)

/*
 * sqe.timeout_flags
 */
IORING_TIMEOUT_ABS: u32 : (1 << 0)
IORING_TIMEOUT_UPDATE: u32 : (1 << 1)
IORING_TIMEOUT_BOOTTIME: u32 : (1 << 2)
IORING_TIMEOUT_REALTIME: u32 : (1 << 3)
IORING_LINK_TIMEOUT_UPDATE: u32 : (1 << 4)
IORING_TIMEOUT_ETIME_SUCCESS: u32 : (1 << 5)
IORING_TIMEOUT_CLOCK_MASK: u32 : (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
IORING_TIMEOUT_UPDATE_MASK: u32 : (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)

/*
 * sq_ring.flags
 */
IORING_SQ_NEED_WAKEUP: u32 : (1 << 0) /* needs io_uring_enter wakeup */
IORING_SQ_CQ_OVERFLOW: u32 : (1 << 1) /* CQ ring is overflown */
IORING_SQ_TASKRUN: u32 : (1 << 2) /* task should enter the kernel */

/*
 * sqe.splice_flags
 * extends splice(2) flags
 */
SPLICE_F_FD_IN_FIXED: u32 : (1 << 31) /* the last bit of __u32 */

/*
 * POLL_ADD flags. Note that since sqe.poll_events is the flag space, the command flags for POLL_ADD are stored in sqe.len.
 *
 * IORING_POLL_ADD_MULTI	Multishot poll. Sets IORING_CQE_F_MORE if the poll handler will continue to report CQEs on behalf of the same SQE.

 * IORING_POLL_UPDATE		Update existing poll request, matching sqe.addr as the old user_data field.
 *
 * IORING_POLL_LEVEL		Level triggered poll.
 */
IORING_POLL_ADD_MULTI: u32 : (1 << 0)
IORING_POLL_UPDATE_EVENTS: u32 : (1 << 1)
IORING_POLL_UPDATE_USER_DATA: u32 : (1 << 2)
IORING_POLL_ADD_LEVEL: u32 : (1 << 3)

IORing_Poll_Bits :: enum {
	ADD_MULTI,
	UPDATE_EVENTS,
	UPDATE_USER_DATA,
	ADD_LEVEL,
}

IORing_Poll_Flags :: bit_set[IORing_Poll_Bits; u32]

/*
  * send/sendmsg and recv/recvmsg flags (sq.ioprio)
  *
  * IORING_RECVSEND_POLL_FIRST	If set, instead of first attempting to send
  *				or receive and arm poll if that yields an
  *				-EAGAIN result, arm poll upfront and skip
  *				the initial transfer attempt.
  *
  * IORING_RECV_MULTISHOT	Multishot recv. Sets IORING_CQE_F_MORE if
  *				the handler will continue to report
  *				CQEs on behalf of the same SQE.
  *
  * IORING_RECVSEND_FIXED_BUF	Use registered buffers, the index is stored in
  *				the buf_index field.
  *
  * IORING_SEND_ZC_REPORT_USAGE
  *				If set, SEND[MSG]_ZC should report
  *				the zerocopy usage in cqe.res
  *				for the IORING_CQE_F_NOTIF cqe.
  *				0 is reported if zerocopy was actually possible.
  *				IORING_NOTIF_USAGE_ZC_COPIED if data was copied
  *				(at least partially).
  */
IORING_RECVSEND_POLL_FIRST: u32 : (1 << 0)
IORING_RECV_MULTISHOT: u32 : (1 << 1)
IORING_RECVSEND_FIXED_BUF: u32 : (1 << 2)
IORING_SEND_ZC_REPORT_USAGE: u32 : (1 << 3)

/*
  * cqe.res for IORING_CQE_F_NOTIF if
  * IORING_SEND_ZC_REPORT_USAGE was requested
  *
  * It should be treated as a flag, all other
  * bits of cqe.res should be treated as reserved!
  */
IORING_NOTIF_USAGE_ZC_COPIED: u32 : (1 << 31)

/*
  * accept flags stored in sq.ioprio
  */
IORING_ACCEPT_MULTISHOT: u32 : (1 << 0)

/*
  * IORING_OP_MSG_RING command types, stored in sq.addr
  */
IORING_MSG :: enum {
	DATA, /* pass sq.len as 'res' and off as user_data */
	SEND_FD, /* send a registered fd to another ring */
}

/*
  * IORING_OP_MSG_RING flags (sq.msg_ring_flags)
  *
  * IORING_MSG_RING_CQE_SKIP	Don't post a CQE to the target ring. Not
  *				applicable for IORING_MSG_DATA, obviously.
  */
IORING_MSG_RING_CQE_SKIP: u32 : (1 << 0)
/* Pass through the flags from sq.file_index to cqe.flags */
IORING_MSG_RING_FLAGS_PASS: u32 : (1 << 1)

IORING_OP :: enum u8 {
	NOP,
	READV,
	WRITEV,
	FSYNC,
	READ_FIXED,
	WRITE_FIXED,
	POLL_ADD,
	POLL_REMOVE,
	SYNC_FILE_RANGE,
	SENDMSG,
	RECVMSG,
	TIMEOUT,
	TIMEOUT_REMOVE,
	ACCEPT,
	ASYNC_CANCEL,
	LINK_TIMEOUT,
	CONNECT,
	FALLOCATE,
	OPENAT,
	CLOSE,
	FILES_UPDATE,
	STATX,
	READ,
	WRITE,
	FADVISE,
	MADVISE,
	SEND,
	RECV,
	OPENAT2,
	EPOLL_CTL,
	SPLICE,
	PROVIDE_BUFFERS,
	REMOVE_BUFFERS,
	TEE,
	SHUTDOWN,
	RENAMEAT,
	UNLINKAT,
	MKDIRAT,
	SYMLINKAT,
	LINKAT,
	/* this goes last, obviously */
	LAST,
}

/*
 * sys_io_uring_register() opcodes and arguments.
 */
IORING_REGISTER :: enum u32 {
	REGISTER_BUFFERS = 0,
	UNREGISTER_BUFFERS = 1,
	REGISTER_FILES = 2,
	UNREGISTER_FILES = 3,
	REGISTER_EVENTFD = 4,
	UNREGISTER_EVENTFD = 5,
	REGISTER_FILES_UPDATE = 6,
	REGISTER_EVENTFD_ASYNC = 7,
	REGISTER_PROBE = 8,
	REGISTER_PERSONALITY = 9,
	UNREGISTER_PERSONALITY = 10,
	REGISTER_RESTRICTIONS = 11,
	REGISTER_ENABLE_RINGS = 12,
	/* extended with tagging */
	REGISTER_FILES2 = 13,
	REGISTER_FILES_UPDATE2 = 14,
	REGISTER_BUFFERS2 = 15,
	REGISTER_BUFFERS_UPDATE = 16,
	/* set/clear io-wq thread affinities */
	REGISTER_IOWQ_AFF = 17,
	UNREGISTER_IOWQ_AFF = 18,
	/* set/get max number of io-wq workers */
	REGISTER_IOWQ_MAX_WORKERS = 19,
	/* register/unregister io_uring fd with the ring */
	REGISTER_RING_FDS = 20,
	UNREGISTER_RING_FDS = 21,
	/* register ring based provide buffer group */
	REGISTER_PBUF_RING = 22,
	UNREGISTER_PBUF_RING = 23,
	/* sync cancelation API */
	REGISTER_SYNC_CANCEL = 24,
	/* register a range of fixed file slots for automatic slot allocation */
	REGISTER_FILE_ALLOC_RANGE = 25,
	/* this goes last */
	REGISTER_LAST,
	/* flag added to the opcode to use a registered ring fd */
	REGISTER_USE_REGISTERED_RING = 1 << 31,
}

IORING_FEAT_SINGLE_MMAP: u32 : (1 << 0)
IORING_FEAT_NODROP: u32 : (1 << 1)
IORING_FEAT_SUBMIT_STABLE: u32 : (1 << 2)
IORING_FEAT_RW_CUR_POS: u32 : (1 << 3)
IORING_FEAT_CUR_PERSONALITY: u32 : (1 << 4)
IORING_FEAT_FAST_POLL: u32 : (1 << 5)
IORING_FEAT_POLL_32BITS: u32 : (1 << 6)
IORING_FEAT_SQPOLL_NONFIXED: u32 : (1 << 7)
IORING_FEAT_EXT_ARG: u32 : (1 << 8)
IORING_FEAT_NATIVE_WORKERS: u32 : (1 << 9)
IORING_FEAT_RSRC_TAGS: u32 : (1 << 10)

/*
 * cqe.flags
 *
 * IORING_CQE_F_BUFFER	If set, the upper 16 bits are the buffer ID
 * IORING_CQE_F_MORE	If set, parent SQE will generate more CQE entries
 * IORING_CQE_F_SOCK_NONEMPTY	If set, more data to read after socket recv
 * IORING_CQE_F_NOTIF	Set for notification CQEs. Can be used to distinct
 * 			them from sends.
 */
IORING_CQE_F_BUFFER: u32 : (1 << 0)
IORING_CQE_F_MORE: u32 : (1 << 1)
IORING_CQE_F_SOCK_NONEMPTY: u32 : (1 << 2)
IORING_CQE_F_NOTIF: u32 : (1 << 3)

IORING_CQE :: enum {
	BUFFER_SHIFT = 16,
}

/*
 * cq_ring->flags
 */
// disable eventfd notifications
IORING_CQ_EVENTFD_DISABLED: u32 : (1 << 0)

/*
 * io_uring_enter(2) flags
 */
IORING_ENTER_GETEVENTS: u32 : (1 << 0)
IORING_ENTER_SQ_WAKEUP: u32 : (1 << 1)
IORING_ENTER_SQ_WAIT: u32 : (1 << 2)
IORING_ENTER_EXT_ARG: u32 : (1 << 3)
IORING_ENTER_REGISTERED_RING: u32 : (1 << 4)

/*
 * Magic offsets for the application to mmap the data it needs
 */
IORING_OFF_SQ_RING: uintptr : 0
IORING_OFF_CQ_RING: u64 : 0x8000000
IORING_OFF_SQES: uintptr : 0x10000000
IORING_OFF_PBUF_RING: u64 : 0x80000000
IORING_OFF_PBUF_SHIFT :: 16
IORING_OFF_MMAP_MASK: u64 : 0xf8000000

sys_io_uring_setup :: proc "contextless" (entries: u32, params: ^io_uring_params) -> int {
	return int(intrinsics.syscall(SYS_io_uring_setup, uintptr(entries), uintptr(params)))
}

sys_io_uring_enter :: proc "contextless" (
	fd: u32,
	to_submit: u32,
	min_complete: u32,
	flags: u32,
	sig: ^sigset_t,
) -> int {
	return int(
		intrinsics.syscall(
			SYS_io_uring_enter,
			uintptr(fd),
			uintptr(to_submit),
			uintptr(min_complete),
			uintptr(flags),
			uintptr(sig),
			NSIG / 8 if sig != nil else 0,
		),
	)
}

sys_io_uring_register :: proc "contextless" (fd: u32, opcode: IORING_REGISTER, arg: rawptr, nr_args: u32) -> int {
	return int(intrinsics.syscall(SYS_io_uring_register, uintptr(fd), uintptr(opcode), uintptr(arg), uintptr(nr_args)))
}


================================================
FILE: old_nbio/doc.odin
================================================
/*
package nbio implements a non blocking IO abstraction layer over several platform specific APIs.

This package implements an event loop based abstraction.

APIs:
- Windows: [[IOCP IO Completion Ports;https://en.wikipedia.org/wiki/Input/output_completion_port]]
- Linux:   [[io_uring;https://en.wikipedia.org/wiki/Io_uring]]
- Darwin:  [[KQueue;https://en.wikipedia.org/wiki/Kqueue]]

How to read the code:

The file nbio.odin can be read a little bit like a header file,
it has all the procedures heavily explained and commented and dispatches them to platform specific code.

You can also have a look at the tests for more general usages.

Example:
	/*
	This example shows a simple TCP server that echos back anything it receives.

	Better error handling and closing/freeing connections are left for the reader.
	*/
	package main

	import "core:fmt"
	import "core:net"
	import "core:os"

	import nbio "nbio/poly"

	Echo_Server :: struct {
		io:          nbio.IO,
		sock:        net.TCP_Socket,
		connections: [dynamic]^Echo_Connection,
	}

	Echo_Connection :: struct {
		server:  ^Echo_Server,
		sock:    net.TCP_Socket,
		buf:     [50]byte,
	}

	main :: proc() {
		server: Echo_Server
		defer delete(server.connections)

		nbio.init(&server.io)
		defer nbio.destroy(&server.io)

		sock, err := nbio.open_and_listen_tcp(&server.io, {net.IP4_Loopback, 8080})
		fmt.assertf(err == nil, "Error opening and listening on localhost:8080: %v", err)
		server.sock = sock

		nbio.accept(&server.io, sock, &server, echo_on_accept)

		// Start the event loop.
		errno: os.Errno
		for errno == os.ERROR_NONE {
			errno = nbio.tick(&server.io)
		}

		fmt.assertf(errno == os.ERROR_NONE, "Server stopped with error code: %v", errno)
	}

	echo_on_accept :: proc(server: ^Echo_Server, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error) {
		fmt.assertf(err == nil, "Error accepting a connection: %v", err)

		// Register a new accept for the next client.
		nbio.accept(&server.io, server.sock, server, echo_on_accept)

		c := new(Echo_Connection)
		c.server = server
		c.sock   = client
		append(&server.connections, c)

		nbio.recv(&server.io, client, c.buf[:], c, echo_on_recv)
	}

	echo_on_recv :: proc(c: ^Echo_Connection, received: int, _: Maybe(net.Endpoint), err: net.Network_Error) {
		fmt.assertf(err == nil, "Error receiving from client: %v", err)

		nbio.send_all(&c.server.io, c.sock, c.buf[:received], c, echo_on_sent)
	}

	echo_on_sent :: proc(c: ^Echo_Connection, sent: int, err: net.Network_Error) {
		fmt.assertf(err == nil, "Error sending to client: %v", err)

		// Accept the next message, to then ultimately echo back again.
		nbio.recv(&c.server.io, c.sock, c.buf[:], c, echo_on_recv)
	}
*/
package nbio


================================================
FILE: old_nbio/nbio.odin
================================================
package nbio

import "core:net"
import "core:os"
import "core:time"

/*
The main IO type that holds the platform dependant implementation state passed around most procedures in this package
*/
IO :: _IO

/*
Initializes the IO type, allocates different things per platform needs

*Allocates Using Provided Allocator*

Inputs:
- io:        The IO struct to initialize
- allocator: (default: context.allocator)

Returns:
- err: An error code when something went wrong with the setup of the platform's IO API, 0 otherwise
*/
init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {
	return _init(io, allocator)
}

/*
The place where the magic happens, each time you call this the IO implementation checks its state
and calls any callbacks which are ready. You would typically call this in a loop

Inputs:
- io: The IO instance to tick

Returns:
- err: An error code when something went when retrieving events, 0 otherwise
*/
tick :: proc(io: ^IO) -> os.Errno {
	return _tick(io)
}

/*
Returns the number of in-progress IO to be completed.
*/
num_waiting :: #force_inline proc(io: ^IO) -> int {
	return _num_waiting(io)
}

/*
Deallocates anything that was allocated when calling init()

Inputs:
- io: The IO instance to deallocate

*Deallocates with the allocator that was passed with the init() call*
*/
destroy :: proc(io: ^IO) {
	_destroy(io)
}

/*
The callback for a "next tick" event

Inputs:
- user: A passed through pointer from initiation to its callback
*/
On_Next_Tick :: #type proc(user: rawptr)

/*
Schedules a callback to be called during the next tick of the event loop.

Inputs:
- io:   The IO instance to use
- user: A pointer that will be passed through to the callback, free to use by you and untouched by us
*/
next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {
	return _next_tick(io, user, callback)
}

/*
The callback for non blocking `timeout` calls

Inputs:
- user: A passed through pointer from initiation to its callback
*/
On_Timeout :: #type proc(user: rawptr)

/*
Schedules a callback to be called after the given duration elapses.

The accuracy depends on the time between calls to `tick`.
When you call it in a loop with no blocks or very expensive calculations in other scheduled event callbacks
it is reliable to about a ms of difference (so timeout of 10ms would almost always be ran between 10ms and 11ms).

Inputs:
- io:       The IO instance to use
- dur:      The minimum duration to wait before calling the given callback
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Timeout` for its arguments
*/
timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) {
	_timeout(io, dur, user, callback)
}

/*
Creates a socket, sets non blocking mode and relates it to the given IO

Inputs:
- io:       The IO instance to initialize the socket on/with
- family:   Should this be an IP4 or IP6 socket
- protocol: The type of socket (TCP or UDP)

Returns:
- socket: The opened socket
- err:    A network error that happened while opening
*/
open_socket :: proc(
	io: ^IO,
	family: net.Address_Family,
	protocol: net.Socket_Protocol,
) -> (
	socket: net.Any_Socket,
	err: net.Network_Error,
) {
	return _open_socket(io, family, protocol)
}

/*
Creates a socket, sets non blocking mode, relates it to the given IO, binds the socket to the given endpoint and starts listening

Inputs:
- io:       The IO instance to initialize the socket on/with
- endpoint: Where to bind the socket to

Returns:
- socket: The opened, bound and listening socket
- err:    A network error that happened while opening
*/
open_and_listen_tcp :: proc(io: ^IO, ep: net.Endpoint) -> (socket: net.TCP_Socket, err: net.Network_Error) {
	family := net.family_from_endpoint(ep)
	sock := open_socket(io, family, .TCP) or_return
	socket = sock.(net.TCP_Socket)

	if err = net.bind(socket, ep); err != nil {
		net.close(socket)
		return
	}

	if err = listen(socket); err != nil {
		net.close(socket)
	}
	return
}

/*
Starts listening on the given socket

Inputs:
- socket:  The socket to start listening
- backlog: The amount of events to keep in the backlog when they are not consumed

Returns:
- err: A network error that happened when starting listening
*/
listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> (err: net.Network_Error) {
	return _listen(socket, backlog)
}

/*
The callback for non blocking `close` requests

Inputs:
- user: A passed through pointer from initiation to its callback
- ok:   Whether the operation suceeded sucessfully
*/
On_Close :: #type proc(user: rawptr, ok: bool)

@private
empty_on_close :: proc(_: rawptr, _: bool) {}

/*
A union of types that are `close`'able by this package
*/
Closable :: union #no_nil {
	net.TCP_Socket,
	net.UDP_Socket,
	net.Socket,
	os.Handle,
}

/*
Closes the given `Closable` socket or file handle that was originally created by this package.

*Due to platform limitations, you must pass a `Closable` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The `Closable` socket or handle (created using/by this package) to close
- user:     An optional pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: An optional callback that is called when the operation completes, see docs for `On_Close` for its arguments
*/
close :: proc(io: ^IO, fd: Closable, user: rawptr = nil, callback: On_Close = empty_on_close) {
	_close(io, fd, user, callback)
}

/*
The callback for non blocking `accept` requests

Inputs:
- user:   A passed through pointer from initiation to its callback
- client: The socket to communicate through with the newly accepted client
- source: The origin of the client
- err:    A network error that occured during the accept process
*/
On_Accept :: #type proc(user: rawptr, client: net.TCP_Socket, source: net.Endpoint, err: net.Network_Error)

/*
Using the given socket, accepts the next incoming connection, calling the callback when that happens

*Due to platform limitations, you must pass a socket that was opened using the `open_socket` and related procedures from this package*

Inputs:
- io:       The IO instance to use
- socket:   A bound and listening socket *that was created using this package*
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Accept` for its arguments
*/
accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) {
	_accept(io, socket, user, callback)
}

/*
The callback for non blocking `connect` requests

Inputs:
- user:   A passed through pointer from initiation to its callback
- socket: A socket that is connected to the given endpoint in the `connect` call
- err:    A network error that occured during the connect call
*/
On_Connect :: #type proc(user: rawptr, socket: net.TCP_Socket, err: net.Network_Error)

/*
Connects to the given endpoint, calling the given callback once it has been done

Inputs:
- io:       The IO instance to use
- endpoint: An endpoint to connect a socket to
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Connect` for its arguments
*/
connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) {
	_, err := _connect(io, endpoint, user, callback)
	if err != nil {
		callback(user, {}, err)
	}
}

/*
The callback for non blocking `recv` requests

Inputs:
- user:       A passed through pointer from initiation to its callback
- received:   The amount of bytes that were read and added to the given buf
- udp_client: If the given socket was a `net.UDP_Socket`, this will be the client that was received from
- err:        A network error if it occured
*/
On_Recv :: #type proc(user: rawptr, received: int, udp_client: Maybe(net.Endpoint), err: net.Network_Error)

/*
Receives from the given socket, at most `len(buf)` bytes, and calls the given callback

*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- socket:   Either a `net.TCP_Socket` or a `net.UDP_Socket` (that was opened/returned by this package) to receive from
- buf:      The buffer to put received bytes into
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Recv` for its arguments
*/
recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv) {
	_recv(io, socket, buf, user, callback)
}

/*
Receives from the given socket until the given buf is full or an error occurred, and calls the given callback

*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- socket:   Either a `net.TCP_Socket` or a `net.UDP_Socket` (that was opened/returned by this package) to receive from
- buf:      The buffer to put received bytes into
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Recv` for its arguments
*/
recv_all :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv) {
	_recv(io, socket, buf, user, callback, all = true)
}

/*
The callback for non blocking `send` and `send_all` requests

Inputs:
- user: A passed through pointer from initiation to its callback
- sent: The amount of bytes that were sent over the connection
- err:  A network error if it occured
*/
On_Sent :: #type proc(user: rawptr, sent: int, err: net.Network_Error)

/*
Sends at most `len(buf)` bytes from the given buffer over the socket connection, and calls the given callback

*Prefer using the `send` proc group*

*Due to platform limitations, you must pass a `net.TCP_Socket` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- socket:   a `net.TCP_Socket` (that was opened/returned by this package) to send to
- buf:      The buffer send
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_tcp :: proc(io: ^IO, socket: net.TCP_Socket, buf: []byte, user: rawptr, callback: On_Sent) {
	_send(io, socket, buf, user, callback)
}

/*
Sends at most `len(buf)` bytes from the given buffer over the socket connection to the given endpoint, and calls the given callback

*Prefer using the `send` proc group*

*Due to platform limitations, you must pass a `net.UDP_Socket` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- endpoint: The endpoint to send bytes to over the socket
- socket:   a `net.UDP_Socket` (that was opened/returned by this package) to send to
- buf:      The buffer send
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_udp :: proc(
	io: ^IO,
	endpoint: net.Endpoint,
	socket: net.UDP_Socket,
	buf: []byte,
	user: rawptr,
	callback: On_Sent,
) {
	_send(io, socket, buf, user, callback, endpoint)
}

/*
Sends at most `len(buf)` bytes from the given buffer over the socket connection, and calls the given callback

*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*
*/
send :: proc {
	send_udp,
	send_tcp,
}

/*
Sends the bytes from the given buffer over the socket connection, and calls the given callback

This will keep sending until either an error or the full buffer is sent

*Prefer using the `send` proc group*

*Due to platform limitations, you must pass a `net.TCP_Socket` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- socket:   a `net.TCP_Socket` (that was opened/returned by this package) to send to
- buf:      The buffer send
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_all_tcp :: proc(io: ^IO, socket: net.TCP_Socket, buf: []byte, user: rawptr, callback: On_Sent) {
	_send(io, socket, buf, user, callback, all = true)
}

/*
Sends the bytes from the given buffer over the socket connection to the given endpoint, and calls the given callback

This will keep sending until either an error or the full buffer is sent

*Prefer using the `send` proc group*

*Due to platform limitations, you must pass a `net.UDP_Socket` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- endpoint: The endpoint to send bytes to over the socket
- socket:   a `net.UDP_Socket` (that was opened/returned by this package) to send to
- buf:      The buffer send
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Sent` for its arguments
*/
send_all_udp :: proc(
	io: ^IO,
	endpoint: net.Endpoint,
	socket: net.UDP_Socket,
	buf: []byte,
	user: rawptr,
	callback: On_Sent,
) {
	_send(io, socket, buf, user, callback, endpoint, all = true)
}

/*
Sends the bytes from the given buffer over the socket connection, and calls the given callback

This will keep sending until either an error or the full buffer is sent

*Due to platform limitations, you must pass a `net.TCP_Socket` or `net.UDP_Socket` that was opened/returned using/by this package*
*/
send_all :: proc {
	send_all_udp,
	send_all_tcp,
}

/*
Opens a file hande, sets non blocking mode and relates it to the given IO

*The perm argument is only used when on the darwin or linux platforms, when on Windows you can't use the os.S_\* constants because they aren't declared*
*To prevent compilation errors on Windows, you should use a `when` statement around using those constants and just pass 0*

Inputs:
- io:   The IO instance to connect the opened file to
- mode: The file mode                                 (default: os.O_RDONLY)
- perm: The permissions to use when creating a file   (default: 0)

Returns:
- handle: The file handle
- err:    The error code when an error occured, 0 otherwise
*/
open :: proc(io: ^IO, path: string, mode: int = os.O_RDONLY, perm: int = 0) -> (handle: os.Handle, err: os.Errno) {
	return _open(io, path, mode, perm)
}

/*
Where to seek from

Options:
- Set:  sets the offset to the given value
- Curr: adds the given offset to the current offset
- End:  adds the given offset to the end of the file
*/
Whence :: enum {
	Set,
	Curr,
	End,
}

/*
Seeks the given handle according to the given offset and whence, so that subsequent read and writes *USING THIS PACKAGE* will do so at that offset

*Some platforms require this package to handle offsets while others have state in the kernel, for this reason you should assume that seeking only affects this package*

Inputs:
- io:     The IO instance to seek on
- fd:     The file handle to seek
- whence: The seek mode/where to seek from (default: Whence.Set)

Returns:
- new_offset: The offset that the file is at when the operation completed
- err:        The error when an error occured, 0 otherwise
*/
seek :: proc(io: ^IO, fd: os.Handle, offset: int, whence: Whence = .Set) -> (new_offset: int, err: os.Errno) {
	return _seek(io, fd, offset, whence)
}

/*
The callback for non blocking `read` or `read_at` requests

Inputs:
- user: A passed through pointer from initiation to its callback
- read: The amount of bytes that were read and added to the given buf
- err:  An error number if an error occured, 0 otherwise
*/
On_Read :: #type proc(user: rawptr, read: int, err: os.Errno)

/*
Reads from the given handle, at the handle's internal offset, at most `len(buf)` bytes, increases the file offset, and calls the given callback

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to read from
- buf:      The buffer to put read bytes into
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Read) {
	_read(io, fd, nil, buf, user, callback)
}

/*
Reads from the given handle, at the handle's internal offset, until the given buf is full or an error occurred, increases the file offset, and calls the given callback

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to read from
- buf:      The buffer to put read bytes into
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read_all :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Read) {
	_read(io, fd, nil, buf, user, callback, all = true)
}

/*
Reads from the given handle, at the given offset, at most `len(buf)` bytes, and calls the given callback

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to read from
- offset:   The offset to begin the read from
- buf:      The buffer to put read bytes into
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read_at :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Read) {
	_read(io, fd, offset, buf, user, callback)
}

/*
Reads from the given handle, at the given offset, until the given buf is full or an error occurred, and calls the given callback

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to read from
- offset:   The offset to begin the read from
- buf:      The buffer to put read bytes into
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments
*/
read_at_all :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Read) {
	_read(io, fd, offset, buf, user, callback, all = true)
}

read_entire_file :: read_full

/*
Reads the entire file (size found by seeking to the end) into a singly allocated buffer that is returned.
The callback is called once the file is read into the returned buf.

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to read from
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Read` for its arguments

Returns:
- buf:      The buffer allocated to the size retrieved by seeking to the end of the file that is filled before calling the callback
*/
read_full :: proc(io: ^IO, fd: os.Handle, user: rawptr, callback: On_Read, allocator := context.allocator) -> []byte {
	size, err := seek(io, fd, 0, .End)
	if err != os.ERROR_NONE {
		callback(user, 0, err)
		return nil
	}

	if size <= 0 {
		callback(user, 0, os.ERROR_NONE)
		return nil
	}

	buf := make([]byte, size, allocator)
	read_at_all(io, fd, 0, buf, user, callback)
	return buf
}

/*
The callback for non blocking `write`, `write_all`, `write_at` and `write_at_all` requests

Inputs:
- user:     A passed through pointer from initiation to its callback
- written:  The amount of bytes that were written to the file
- err:      An error number if an error occured, 0 otherwise
*/
On_Write :: #type proc(user: rawptr, written: int, err: os.Errno)

/*
Writes to the given handle, at the handle's internal offset, at most `len(buf)` bytes, increases the file offset, and calls the given callback

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to write to
- buf:      The buffer to write to the file
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Write) {
	_write(io, fd, nil, buf, user, callback)
}

/*
Writes the given buffer to the given handle, at the handle's internal offset, increases the file offset, and calls the given callback

This keeps writing until either an error or the full buffer being written

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to write to
- buf:      The buffer to write to the file
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write_all :: proc(io: ^IO, fd: os.Handle, buf: []byte, user: rawptr, callback: On_Write) {
	_write(io, fd, nil, buf, user, callback, true)
}

/*
Writes to the given handle, at the given offset, at most `len(buf)` bytes, and calls the given callback

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to write to from
- offset:   The offset to begin the write from
- buf:      The buffer to write to the file
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write_at :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Write) {
	_write(io, fd, offset, buf, user, callback)
}

/*
Writes the given buffer to the given handle, at the given offset, and calls the given callback

This keeps writing until either an error or the full buffer being written

*Due to platform limitations, you must pass a `os.Handle` that was opened/returned using/by this package*

Inputs:
- io:       The IO instance to use
- fd:       The file handle (created using/by this package) to write to from
- offset:   The offset to begin the write from
- buf:      The buffer to write to the file
- user:     A pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Write` for its arguments
*/
write_at_all :: proc(io: ^IO, fd: os.Handle, offset: int, buf: []byte, user: rawptr, callback: On_Write) {
	_write(io, fd, offset, buf, user, callback, true)
}

Poll_Event :: enum {
	// The subject is ready to be read from.
	Read,
	// The subject is ready to be written to.
	Write,
}

/*
The callback for poll requests

Inputs:
- user:  A passed through pointer from initiation to its callback
- event: The event that is ready to go
*/
On_Poll :: #type proc(user: rawptr, event: Poll_Event)

/*
Polls for the given event on the subject handle

Inputs:
- io:       The IO instance to use
- fd:       The file descriptor to poll
- event:    Whether to call the callback when `fd` is ready to be read from, or be written to
- multi:    Keeps the poll after an event happens, calling the callback again for further events, remove poll with `poll_remove`
- user:     An optional pointer that will be passed through to the callback, free to use by you and untouched by us
- callback: The callback that is called when the operation completes, see docs for `On_Poll` for its arguments
*/
poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) {
	_poll(io, fd, event, multi, user, callback)
}

/*
Removes the polling for this `subject`+`event` pairing

This is only needed when `poll` was called with `multi` set to `true`

Inputs:
- io:       The IO instance to use
- fd:       The file descriptor to remove the poll of
- event:    The event to remove the poll of
*/
poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) {
	_poll_remove(io, fd, event)
}

MAX_USER_ARGUMENTS :: size_of(rawptr) * 5

Completion :: struct {
	// Implementation specifics, don't use outside of implementation/os.
	using _:   _Completion,

	user_data: rawptr,

	// Callback pointer and user args passed in poly variants.
	user_args: [MAX_USER_ARGUMENTS + size_of(rawptr)]byte,
}

@(private)
Operation :: union #no_nil {
	Op_Accept,
	Op_Close,
	Op_Connect,
	Op_Read,
	Op_Recv,
	Op_Send,
	Op_Write,
	Op_Timeout,
	Op_Next_Tick,
	Op_Poll,
	Op_Poll_Remove,
}


================================================
FILE: old_nbio/nbio_darwin.odin
================================================
package nbio

import "core:container/queue"
import "core:net"
import "core:os"
import "core:time"
import "core:sys/kqueue"
import "core:sys/posix"

_init :: proc(io: ^IO, allocator := context.allocator) -> (err: os.Errno) {
	qerr: posix.Errno
	io.kq, qerr = kqueue.kqueue()
	if qerr != nil { return kq_err_to_os_err(qerr) }

	pool_init(&io.completion_pool, allocator = allocator)

	io.timeouts = make([dynamic]^Completion, allocator)
	io.io_pending = make([dynamic]^Completion, allocator)

	queue.init(&io.completed, allocator = allocator)

	io.allocator = allocator
	return
}

_num_waiting :: #force_inline proc(io: ^IO) -> int {
	return io.completion_pool.num_waiting
}

_destroy :: proc(io: ^IO) {
	context.allocator = io.allocator

	delete(io.timeouts)
	delete(io.io_pending)

	queue.destroy(&io.completed)

	posix.close(io.kq)

	pool_destroy(&io.completion_pool)
}

_tick :: proc(io: ^IO) -> os.Errno {
	return flush(io)
}

_listen :: proc(socket: net.TCP_Socket, backlog := 1000) -> net.Network_Error {
	errno := os.listen(os.Socket(socket), backlog)
	if errno != nil {
		return net._listen_error()
	}
	return nil
}

_accept :: proc(io: ^IO, socket: net.TCP_Socket, user: rawptr, callback: On_Accept) -> ^Completion {
	completion := pool_get(&io.completion_pool)

	completion.ctx       = context
	completion.user_data = user
	completion.operation = Op_Accept{
		callback = callback,
		sock     = socket,
	}

	queue.push_back(&io.completed, completion)
	return completion
}

// Wraps os.close using the kqueue.
_close :: proc(io: ^IO, fd: Closable, user: rawptr, callback: On_Close) -> ^Completion {
	completion := pool_get(&io.completion_pool)

	completion.ctx           = context
	completion.user_data     = user

	completion.operation = Op_Close{
		callback = callback,
	}
	op := &completion.operation.(Op_Close)

	switch h in fd {
	case net.TCP_Socket: op.handle = os.Handle(h)
	case net.UDP_Socket: op.handle = os.Handle(h)
	case net.Socket:     op.handle = os.Handle(h)
	case os.Handle:      op.handle = h
	}

	queue.push_back(&io.completed, completion)
	return completion
}

// TODO: maybe call this dial?
_connect :: proc(io: ^IO, endpoint: net.Endpoint, user: rawptr, callback: On_Connect) -> (^Completion, net.Network_Error) {
	if endpoint.port == 0 {
		return nil, net.Dial_Error.Port_Required
	}

	family := net.family_from_endpoint(endpoint)
	sock, err := net.create_socket(family, .TCP)
	if err != nil {
		return nil, err
	}

	if prep_err := _prepare_socket(sock); prep_err != nil {
		close(io, net.any_socket_to_socket(sock))
		return nil, prep_err
	}

	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Connect {
		callback = callback,
		socket   = sock.(net.TCP_Socket),
		sockaddr = _endpoint_to_sockaddr(endpoint),
	}

	queue.push_back(&io.completed, completion)
	return completion, nil
}

_read :: proc(
	io: ^IO,
	fd: os.Handle,
	offset: Maybe(int),
	buf: []byte,
	user: rawptr,
	callback: On_Read,
	all := false,
) -> ^Completion {
	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Read {
		callback = callback,
		fd       = fd,
		buf      = buf,
		offset   = offset.? or_else -1,
		all      = all,
		len      = len(buf),
	}

	queue.push_back(&io.completed, completion)
	return completion
}

_recv :: proc(io: ^IO, socket: net.Any_Socket, buf: []byte, user: rawptr, callback: On_Recv, all := false) -> ^Completion {
	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Recv {
		callback = callback,
		socket   = socket,
		buf      = buf,
		all      = all,
		len      = len(buf),
	}

	queue.push_back(&io.completed, completion)
	return completion
}

_send :: proc(
	io: ^IO,
	socket: net.Any_Socket,
	buf: []byte,
	user: rawptr,
	callback: On_Sent,
	endpoint: Maybe(net.Endpoint) = nil,
	all := false,
) -> ^Completion {
	if _, ok := socket.(net.UDP_Socket); ok {
		assert(endpoint != nil)
	}

	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Send {
		callback = callback,
		socket   = socket,
		buf      = buf,
		endpoint = endpoint,
		all      = all,
		len      = len(buf),
	}

	queue.push_back(&io.completed, completion)
	return completion
}

_write :: proc(
	io: ^IO,
	fd: os.Handle,
	offset: Maybe(int),
	buf: []byte,
	user: rawptr,
	callback: On_Write,
	all := false,
) -> ^Completion {
	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Write {
		callback = callback,
		fd       = fd,
		buf      = buf,
		offset   = offset.? or_else -1,
		all      = all,
		len      = len(buf),
	}

	queue.push_back(&io.completed, completion)
	return completion
}

// Runs the callback after the timeout, using the kqueue.
_timeout :: proc(io: ^IO, dur: time.Duration, user: rawptr, callback: On_Timeout) -> ^Completion {
	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Timeout {
		callback = callback,
		expires  = time.time_add(time.now(), dur),
	}

	append(&io.timeouts, completion)
	return completion
}

_next_tick :: proc(io: ^IO, user: rawptr, callback: On_Next_Tick) -> ^Completion {
	completion := pool_get(&io.completion_pool)
	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Next_Tick {
		callback = callback,
	}

	queue.push_back(&io.completed, completion)
	return completion
}

_poll :: proc(io: ^IO, fd: os.Handle, event: Poll_Event, multi: bool, user: rawptr, callback: On_Poll) -> ^Completion {
	completion := pool_get(&io.completion_pool)

	completion.ctx = context
	completion.user_data = user
	completion.operation = Op_Poll{
		callback = callback,
		fd       = fd,
		event    = event,
		multi    = multi,
	}

	append(&io.io_pending, completion)
	return completion
}

_poll_remove :: proc(io: ^IO, fd: os.Handle, event: Poll_Event) -> ^Completion {
	completion := pool_get(&io.completion_pool)

	completion.ctx = context
	completion.operation = Op_Poll_Remove{
		fd    = fd,
		event = event,
	}

	append(&io.io_pending, completion)
	return completion
}


================================================
FILE: old_nbio/nbio_internal_darwin.odin
================================================
#+private
package nbio

import "base:runtime"

import "core:container/queue"
import "core:mem"
import "core:net"
import "core:os"
import "core:time"
import "core:sys/posix"
import "core:sys/kqueue"

MAX_EVENTS :: 256

_IO :: struct {
	kq:              posix.FD,
	io_inflight:     int,
	completion_pool: Pool(Completion),
	timeouts:        [dynamic]^Completion,
	completed:       queue.Queue(^Completion),
	io_pending:      [dynamic]^Completion,
	allocator:       mem.Allocator,
}

_Completion :: struct {
	operation: Operation,
	ctx:       runtime.Context,
}

Op_Accept :: struct {
	callback: On_Accept,
	sock:     net.TCP_Socket,
}

Op_Close :: struct {
	callback: On_Close,
	handle:   os.Handle,
}

Op_Connect :: struct {
	callback:  On_Connect,
	socket:    net.TCP_Socket,
	sockaddr:  os.SOCKADDR_STORAGE_LH,
	initiated: bool,
}

Op_Recv :: struct {
	callback: On_Recv,
	socket:   net.Any_Socket,
	buf:      []byte,
	all:      bool,
	received: int,
	len:      int,
}

Op_Send :: struct {
	callback: On_Sent,
	socket:   net.Any_Socket,
	buf:      []byte,
	endpoint: Maybe(net.Endpoint),
	all:      bool,
	len:      int,
	sent:     int,
}

Op_Read :: struct {
	callback: On_Read,
	fd:       os.Handle,
	buf:      []byte,
	offset:	  int,
	all:   	  bool,
	read:  	  int,
	len:   	  int,
}

Op_Write :: struct {
	callback: On_Write,
	fd:       os.Handle,
	buf:      []byte,
	offset:   int,
	all:      bool,
	written:  int,
	len:      int,
}

Op_Timeout :: struct {
	callback: On_Timeout,
	expires:  time.Time,
}

Op_Next_Tick :: struct {
	callback: On_Next_Tick,
}

Op_Poll :: struct {
	callback: On_Poll,
	fd:       os.Handle,
	event:    Poll_Event,
	multi:    bool,
}

Op_Poll_Remove :: struct {
	fd:    os.Handle,
	event: Poll_Event,
}

flush :: proc(io: ^IO) -> os.Errno {
	events: [MAX_EVENTS]kqueue.KEvent

	min_timeout := flush_timeouts(io)
	change_events := flush_io(io, events[:])

	if (change_events > 0 || queue.len(io.completed) == 0) {
		if (change_events == 0 && queue.len(io.completed) == 0 && io.io_inflight == 0) {
			return os.ERROR_NONE
		}

		max_timeout := time.Millisecond * 10
		ts: posix.timespec
		ts.tv_nsec = min(min_timeout.? or_else i64(max_timeout), i64(max_timeout))
		new_events, err := kqueue.kevent(io.kq, events[:change_events], events[:], &ts)
		if err != nil { return ev_err_to_os_err(err) }

		// PERF: this is ordered and O(N), can this be made unordered?
		remove_range(&io.io_pending, 0, change_events)

		io.io_inflight += change_events
		io.io_inflight -= int(new_events)

		if new_events > 0 {
			queue.reserve(&io.completed, int(new_events))
			for event in events[:new_events] {
				completion := cast(^Completion)event.udata
				queue.push_back(&io.completed, completion)
			}
		}
	}

	// Save length so we avoid an infinite loop when there is added to the queue in a callback.
	n := queue.len(io.completed)
	for _ in 0 ..< n {
		completed := queue.pop_front(&io.completed)
		context = completed.ctx

		switch &op in completed.operation {
		case Op_Accept:      do_accept     (io, completed, &op)
		case Op_Close:       do_close      (io, completed, &op)
		case Op_Connect:     do_connect    (io, completed, &op)
		case Op_Read:        do_read       (io, completed, &op)
		case Op_Recv:        do_recv       (io, completed, &op)
		case Op_Send:        do_send       (io, completed, &op)
		case Op_Write:       do_write      (io, completed, &op)
		case Op_Timeout:     do_timeout    (io, completed, &op)
		case Op_Next_Tick:   do_next_tick  (io, completed, &op)
		case Op_Poll:        do_poll       (io, completed, &op)
		case Op_Poll_Remove: do_poll_remove(io, completed, &op)
		case: unreachable()
		}
	}

	return os.ERROR_NONE
}

flush_io :: proc(io: ^IO, events: []kqueue.KEvent) -> int {
	events := events
	events_loop: for &event, i in events {
		if len(io.io_pending) <= i { return i }
		completion := io.io_pending[i]

		switch op in completion.operation {
		case Op_Accept:
			event.ident = uintptr(op.sock)
			event.filter = .Read
		case Op_Connect:
			event.ident = uintptr(op.socket)
			event.filter = .Write
		case Op_Read:
			event.ident = uintptr(op.fd)
			event.filter = .Read
		case Op_Write:
			event.ident = uintptr(op.fd)
			event.filter = .Read
		case Op_Recv:
			event.ident = uintptr(os.Socket(net.any_socket_to_socket(op.socket)))
			event.filter = .Read
		case Op_Send:
			event.ident = uintptr(os.Socket(net.any_socket_to_socket(op.socket)))
			event.filter = .Write
		case Op_Poll:
			event.ident = uintptr(op.fd)
			switch op.event {
			case .Read:  event.filter = .Read
			case .Write: event.filter = .Write
			case:        unreachable()
			}

			event.flags = {.Add, .Enable}
			if !op.multi {
				event.flags += {.One_Shot}
			}

			event.udata = completion

			continue events_loop
		case Op_Poll_Remove:
			event.ident = uintptr(op.fd)
			switch op.event {
			case .Read:  event.filter = .Read
			case .Write: event.filter = .Write
			case:        unreachable()
			}

			event.flags = {.Delete, .Disable, .One_Shot}

			event.udata = completion

			continue events_loop
		case Op_Timeout, Op_Close, Op_Next_Tick:
			panic("invalid completion operation queued")
		}

		event.flags = {.Add, .Enable, .One_Shot}
		event.udata = completion
	}

	return len(events)
}

flush_timeouts :: proc(io: ^IO) -> (min_timeout: Maybe(i64)) {
	now: time.Time
	// PERF: is there a faster way to compare time? Or time since program start and compare that?
	if len(io.timeouts) > 0 { now = time.now() }

	for i := len(io.timeouts) - 1; i >= 0; i -= 1 {
		completion := io.timeouts[i]

		timeout, ok := &completion.operation.(Op_Timeout)
		if !ok { panic("non-timeout operation found in the timeouts queue") }

		unow := time.to_unix_nanoseconds(now)
		expires := time.to_unix_nanoseconds(timeout.expires)
		if unow >= expires {
			ordered_remove(&io.timeouts, i)
			queue.push_back(&io.completed, completion)
			continue
		}

		timeout_ns := expires - unow
		if min, has_min_timeout := min_timeout.(i64); has_min_timeout {
			if timeout_ns < min {
				min_timeout = timeout_ns
			}
		} else {
			min_timeout = timeout_ns
		}
	}

	return
}

do_accept :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {
	err: net.Network_Error
	client, source, accept_err := net.accept_tcp(op.sock)
	if accept_err == .Would_Block {
		append(&io.io_pending, completion)
		return
	} else if accept_err != nil {
		err = accept_err
	}

	if err == nil {
		err = _prepare_socket(client)
	}

	if err != nil {
		net.close(client)
		op.callback(completion.user_data, {}, {}, err)
	} else {
		op.callback(completion.user_data, client, source, nil)
	}

	pool_put(&io.completion_pool, completion)
}

do_close :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {
	ok := os.close(op.handle)

	op.callback(completion.user_data, ok == os.ERROR_NONE)

	pool_put(&io.completion_pool, completion)
}

do_connect :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {
	defer op.initiated = true

	err: os.Errno
	if op.initiated {
		// We have already called os.connect, retrieve error number only.
		os.getsockopt(os.Socket(op.socket), os.SOL_SOCKET, os.SO_ERROR, &err, size_of(os.Errno))
	} else {
		err = os.connect(os.Socket(op.socket), (^os.SOCKADDR)(&op.sockaddr), i32(op.sockaddr.len))
		if err == os.EINPROGRESS {
			append(&io.io_pending, completion)
			return
		}
	}

	if err != os.ERROR_NONE {
		net.close(op.socket)
		op.callback(completion.user_data, {}, net._dial_error())
	} else {
		op.callback(completion.user_data, op.socket, nil)
	}

	pool_put(&io.completion_pool, completion)
}

do_read :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {
	read: int
	err: os.Errno
	//odinfmt:disable
	switch {
	case op.offset >= 0: read, err = os.read_at(op.fd, op.buf, i64(op.offset))
	case:                read, err = os.read(op.fd, op.buf)
	}
	//odinfmt:enable

	op.read += read

	if err != os.ERROR_NONE {
		if err == os.EWOULDBLOCK {
			append(&io.io_pending, completion)
			return
		}

		op.callback(completion.user_data, op.read, err)
		pool_put(&io.completion_pool, completion)
		return
	}

	if op.all && op.read < op.len {
		op.buf = op.buf[read:]

		if op.offset >= 0 {
			op.offset += read
		}

		do_read(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.read, os.ERROR_NONE)
	pool_put(&io.completion_pool, completion)
}

do_recv :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {
	received: int
	err: net.Network_Error
	remote_endpoint: Maybe(net.Endpoint)
	switch sock in op.socket {
	case net.TCP_Socket:
		received, err = net.recv_tcp(sock, op.buf)

		if err == net.TCP_Recv_Error.Would_Block {
			append(&io.io_pending, completion)
			return
		}
	case net.UDP_Socket:
		received, remote_endpoint, err = net.recv_udp(sock, op.buf)

		if err == net.UDP_Recv_Error.Would_Block {
			append(&io.io_pending, completion)
			return
		}
	}

	op.received += received

	if err != nil {
		op.callback(completion.user_data, op.received, remote_endpoint, err)
		pool_put(&io.completion_pool, completion)
		return
	}

	if op.all && op.received < op.len {
		op.buf = op.buf[received:]
		do_recv(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.received, remote_endpoint, err)
	pool_put(&io.completion_pool, completion)
}

do_send :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {
	sent:  u32
	errno: os.Errno
	err:   net.Network_Error

	switch sock in op.socket {
	case net.TCP_Socket:
		sent, errno = os.send(os.Socket(sock), op.buf, 0)
		if errno != nil {
			err = net._tcp_send_error()
		}

	case net.UDP_Socket:
		toaddr := _endpoint_to_sockaddr(op.endpoint.(net.Endpoint))
		sent, errno = os.sendto(os.Socket(sock), op.buf, 0, cast(^os.SOCKADDR)&toaddr, i32(toaddr.len))
		if errno != nil {
			err = net._udp_send_error()
		}
	}

	op.sent += int(sent)

	if errno != os.ERROR_NONE {
		if errno == os.EWOULDBLOCK {
			append(&io.io_pending, completion)
			return
		}

		op.callback(completion.user_data, op.sent, err)
		pool_put(&io.completion_pool, completion)
		return
	}

	if op.all && op.sent < op.len {
		op.buf = op.buf[sent:]
		do_send(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.sent, nil)
	pool_put(&io.completion_pool, completion)
}

do_write :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {
	written: int
	err: os.Errno
	//odinfmt:disable
	switch {
	case op.offset >= 0: written, err = os.write_at(op.fd, op.buf, i64(op.offset))
	case:                written, err = os.write(op.fd, op.buf)
	}
	//odinfmt:enable

	op.written += written

	if err != os.ERROR_NONE {
		if err == os.EWOULDBLOCK {
			append(&io.io_pending, completion)
			return
		}

		op.callback(completion.user_data, op.written, err)
		pool_put(&io.completion_pool, completion)
		return
	}

	// The write did not write the whole buffer, need to write more.
	if op.all && op.written < op.len {
		op.buf = op.buf[written:]

		// Increase offset so we don't overwrite what we just wrote.
		if op.offset >= 0 {
			op.offset += written
		}

		do_write(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.written, os.ERROR_NONE)
	pool_put(&io.completion_pool, completion)
}

do_timeout :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {
	op.callback(completion.user_data)
	pool_put(&io.completion_pool, completion)
}

do_poll :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {
	op.callback(completion.user_data, op.event)
	if !op.multi {
		pool_put(&io.completion_pool, completion)
	}
}

do_poll_remove :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {
	pool_put(&io.completion_pool, completion)
}

do_next_tick :: proc(io: ^IO, completion: ^Completion, op: ^Op_Next_Tick) {
	op.callback(completion.user_data)
	pool_put(&io.completion_pool, completion)
}

kq_err_to_os_err :: proc(err: posix.Errno) -> os.Errno {
	return os.Platform_Error(err)
}

ev_err_to_os_err :: proc(err: posix.Errno) -> os.Errno {
	return os.Platform_Error(err)
}

// Private proc in net package, verbatim copy.
_endpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: os.SOCKADDR_STORAGE_LH) {
	switch a in ep.address {
	case net.IP4_Address:
		(^os.sockaddr_in)(&sockaddr)^ = os.sockaddr_in {
			sin_port   = u16be(ep.port),
			sin_addr   = transmute(os.in_addr)a,
			sin_family = u8(os.AF_INET),
			sin_len    = size_of(os.sockaddr_in),
		}
		return
	case net.IP6_Address:
		(^os.sockaddr_in6)(&sockaddr)^ = os.sockaddr_in6 {
			sin6_port   = u16be(ep.port),
			sin6_addr   = transmute(os.in6_addr)a,
			sin6_family = u8(os.AF_INET6),
			sin6_len    = size_of(os.sockaddr_in6),
		}
		return
	}
	unreachable()
}


================================================
FILE: old_nbio/nbio_internal_linux.odin
================================================
#+private
package nbio

import "base:runtime"

import "core:c"
import "core:container/queue"
import "core:fmt"
import "core:mem"
import "core:net"
import "core:os"
import "core:sys/linux"

import io_uring "_io_uring"

NANOSECONDS_PER_SECOND :: 1e+9

_IO :: struct {
	ring:            io_uring.IO_Uring,
	completion_pool: Pool(Completion),
	// Ready to be submitted to kernel.
	unqueued:        queue.Queue(^Completion),
	// Ready to run callbacks.
	completed:       queue.Queue(^Completion),
	ios_queued:      u64,
	ios_in_kernel:   u64,
	allocator:       mem.Allocator,
}

_Completion :: struct {
	result:    i32,
	operation: Operation,
	ctx:       runtime.Context,
}

Op_Accept :: struct {
	callback:    On_Accept,
	socket:      net.TCP_Socket,
	sockaddr:    os.SOCKADDR_STORAGE_LH,
	sockaddrlen: c.int,
}

Op_Close :: struct {
	callback: On_Close,
	fd:       os.Handle,
}

Op_Connect :: struct {
	callback: On_Connect,
	socket:   net.TCP_Socket,
	sockaddr: os.SOCKADDR_STORAGE_LH,
}

Op_Read :: struct {
	callback: On_Read,
	fd:       os.Handle,
	buf:      []byte,
	offset:   int,
	all:      bool,
	read:     int,
	len:      int,
}

Op_Write :: struct {
	callback: On_Write,
	fd:       os.Handle,
	buf:      []byte,
	offset:   int,
	all:      bool,
	written:  int,
	len:      int,
}

Op_Send :: struct {
	callback: On_Sent,
	socket:   net.Any_Socket,
	buf:      []byte,
	len:      int,
	sent:     int,
	all:      bool,
}

Op_Recv :: struct {
	callback: On_Recv,
	socket:   net.Any_Socket,
	buf:      []byte,
	all:      bool,
	received: int,
	len:      int,
}

Op_Timeout :: struct {
	callback: On_Timeout,
	expires:  linux.Time_Spec,
}

Op_Next_Tick :: struct {
	callback: On_Next_Tick,
}

Op_Poll :: struct {
	callback: On_Poll,
	fd:       os.Handle,
	event:    Poll_Event,
	multi:    bool,
}

Op_Poll_Remove :: struct {
	fd:    os.Handle,
	event: Poll_Event,
}

flush :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {
	err := flush_submissions(io, wait_nr, timeouts, etime)
	if err != os.ERROR_NONE { return err }

	err = flush_completions(io, 0, timeouts, etime)
	if err != os.ERROR_NONE { return err }

	// Store length at this time, so we don't infinite loop if any of the enqueue
	// procs below then add to the queue again.
	n := queue.len(io.unqueued)

	// odinfmt: disable
	for _ in 0..<n {
		unqueued := queue.pop_front(&io.unqueued)
		switch &op in unqueued.operation {
		case Op_Accept:      accept_enqueue     (io, unqueued, &op)
		case Op_Close:       close_enqueue      (io, unqueued, &op)
		case Op_Connect:     connect_enqueue    (io, unqueued, &op)
		case Op_Read:        read_enqueue       (io, unqueued, &op)
		case Op_Recv:        recv_enqueue       (io, unqueued, &op)
		case Op_Send:        send_enqueue       (io, unqueued, &op)
		case Op_Write:       write_enqueue      (io, unqueued, &op)
		case Op_Timeout:     timeout_enqueue    (io, unqueued, &op)
		case Op_Poll:        poll_enqueue       (io, unqueued, &op)
		case Op_Poll_Remove: poll_remove_enqueue(io, unqueued, &op)
		case Op_Next_Tick:   unreachable()
		}
	}

	n = queue.len(io.completed)
	for _ in 0 ..< n {
		completed := queue.pop_front(&io.completed)
		context = completed.ctx

		switch &op in completed.operation {
		case Op_Accept:      accept_callback     (io, completed, &op)
		case Op_Close:       close_callback      (io, completed, &op)
		case Op_Connect:     connect_callback    (io, completed, &op)
		case Op_Read:        read_callback       (io, completed, &op)
		case Op_Recv:        recv_callback       (io, completed, &op)
		case Op_Send:        send_callback       (io, completed, &op)
		case Op_Write:       write_callback      (io, completed, &op)
		case Op_Timeout:     timeout_callback    (io, completed, &op)
		case Op_Poll:        poll_callback       (io, completed, &op)
		case Op_Poll_Remove: poll_remove_callback(io, completed, &op)
		case Op_Next_Tick:   next_tick_callback  (io, completed, &op)
		case: unreachable()
		}
	}
	// odinfmt: enable

	return os.ERROR_NONE
}

flush_completions :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {
	cqes: [256]io_uring.io_uring_cqe
	wait_remaining := wait_nr
	for {
		completed, err := io_uring.copy_cqes(&io.ring, cqes[:], wait_remaining)
		if err != .None { return ring_err_to_os_err(err) }

		if wait_remaining < completed {
			wait_remaining = 0
		} else {
			wait_remaining -= completed
		}

		if completed > 0 {
			queue.reserve(&io.completed, int(completed))
			for cqe in cqes[:completed] {
				io.ios_in_kernel -= 1

				if cqe.user_data == 0 {
					timeouts^ -= 1

					if (-cqe.res == i32(os.ETIME)) {
						etime^ = true
					}
					continue
				}

				completion := cast(^Completion)uintptr(cqe.user_data)
				completion.result = cqe.res

				queue.push_back(&io.completed, completion)
			}
		}

		if completed < len(cqes) { break }
	}

	return os.ERROR_NONE
}

flush_submissions :: proc(io: ^IO, wait_nr: u32, timeouts: ^uint, etime: ^bool) -> os.Errno {
	for {
		submitted, err := io_uring.submit(&io.ring, wait_nr)
		#partial switch err {
		case .None:
			break
		case .Signal_Interrupt:
			continue
		case .Completion_Queue_Overcommitted, .System_Resources:
			ferr := flush_completions(io, 1, timeouts, etime)
			if ferr != os.ERROR_NONE { return ferr }
			continue
		case:
			return ring_err_to_os_err(err)
		}

		io.ios_queued -= u64(submitted)
		io.ios_in_kernel += u64(submitted)
		break
	}

	return os.ERROR_NONE
}

accept_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {
	_, err := io_uring.accept(
		&io.ring,
		u64(uintptr(completion)),
		os.Socket(op.socket),
		cast(^os.SOCKADDR)&op.sockaddr,
		&op.sockaddrlen,
	)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

accept_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Accept) {
	if completion.result < 0 {
		errno := os.Platform_Error(-completion.result)
		#partial switch errno {
		case .EINTR, .EWOULDBLOCK:
			accept_enqueue(io, completion, op)
		case:
			op.callback(completion.user_data, 0, {}, net._accept_error(errno))
			pool_put(&io.completion_pool, completion)
		}
		return
	}

	client := net.TCP_Socket(completion.result)
	err    := _prepare_socket(client)
	source := sockaddr_storage_to_endpoint(&op.sockaddr)

	op.callback(completion.user_data, client, source, err)
	pool_put(&io.completion_pool, completion)
}

close_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {
	_, err := io_uring.close(&io.ring, u64(uintptr(completion)), op.fd)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

close_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Close) {
	errno := os.Platform_Error(-completion.result)

	// In particular close() should not be retried after an EINTR
	// since this may cause a reused descriptor from another thread to be closed.
	op.callback(completion.user_data, errno == .NONE || errno == .EINTR)
	pool_put(&io.completion_pool, completion)
}

connect_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {
	_, err := io_uring.connect(
		&io.ring,
		u64(uintptr(completion)),
		os.Socket(op.socket),
		cast(^os.SOCKADDR)&op.sockaddr,
		size_of(op.sockaddr),
	)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

connect_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Connect) {
	errno := os.Platform_Error(-completion.result)
	#partial switch errno {
	case .EINTR, .EWOULDBLOCK:
		connect_enqueue(io, completion, op)
		return
	case .NONE:
		op.callback(completion.user_data, op.socket, nil)
	case:
		net.close(op.socket)
		op.callback(completion.user_data, {}, net._dial_error(errno))
	}
	pool_put(&io.completion_pool, completion)
}

read_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {
	// Max tells linux to use the file cursor as the offset.
	offset := max(u64) if op.offset < 0 else u64(op.offset)

	_, err := io_uring.read(&io.ring, u64(uintptr(completion)), op.fd, op.buf, offset)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

read_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Read) {
	if completion.result < 0 {
		errno := os.Platform_Error(-completion.result)
		#partial switch errno {
		case .EINTR, .EWOULDBLOCK:
			read_enqueue(io, completion, op)
		case:
			op.callback(completion.user_data, op.read, errno)
			pool_put(&io.completion_pool, completion)
		}
		return
	}

	op.read += int(completion.result)

	if op.all && op.read < op.len {
		op.buf = op.buf[completion.result:]
		read_enqueue(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.read, os.ERROR_NONE)
	pool_put(&io.completion_pool, completion)
}

recv_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {
	tcpsock, ok := op.socket.(net.TCP_Socket)
	if !ok {
		// TODO: figure out and implement.
		unimplemented("UDP recv is unimplemented for linux nbio")
	}

	_, err := io_uring.recv(&io.ring, u64(uintptr(completion)), os.Socket(tcpsock), op.buf, 0)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}
	// TODO: handle other errors, also in other enqueue procs.

	io.ios_queued += 1
}

recv_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Recv) {
	if completion.result < 0 {
		errno := os.Platform_Error(-completion.result)
		#partial switch errno {
		case .EINTR, .EWOULDBLOCK:
			recv_enqueue(io, completion, op)
		case:
			op.callback(completion.user_data, op.received, {}, net._tcp_recv_error(errno))
			pool_put(&io.completion_pool, completion)
		}
		return
	}

	op.received += int(completion.result)

	if op.all && op.received < op.len {
		op.buf = op.buf[completion.result:]
		recv_enqueue(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.received, {}, nil)
	pool_put(&io.completion_pool, completion)
}

send_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {
	tcpsock, ok := op.socket.(net.TCP_Socket)
	if !ok {
		// TODO: figure out and implement.
		unimplemented("UDP send is unimplemented for linux nbio")
	}

	_, err := io_uring.send(&io.ring, u64(uintptr(completion)), os.Socket(tcpsock), op.buf, 0)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

send_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Send) {
	if completion.result < 0 {
		errno := os.Platform_Error(-completion.result)
		#partial switch errno {
		case .EINTR, .EWOULDBLOCK:
			send_enqueue(io, completion, op)
		case:
			op.callback(completion.user_data, op.sent, net._tcp_send_error(errno))
			pool_put(&io.completion_pool, completion)
		}
		return
	}

	op.sent += int(completion.result)

	if op.all && op.sent < op.len {
		op.buf = op.buf[completion.result:]
		send_enqueue(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.sent, nil)
	pool_put(&io.completion_pool, completion)
}

write_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {
	// Max tells linux to use the file cursor as the offset.
	offset := max(u64) if op.offset < 0 else u64(op.offset)

	_, err := io_uring.write(&io.ring, u64(uintptr(completion)), op.fd, op.buf, offset)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

write_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Write) {
	if completion.result < 0 {
		errno := os.Platform_Error(-completion.result)
		#partial switch errno {
		case .EINTR, .EWOULDBLOCK:
			write_enqueue(io, completion, op)
		case:
			op.callback(completion.user_data, op.written, errno)
			pool_put(&io.completion_pool, completion)
		}
		return
	}

	op.written += int(completion.result)

	if op.all && op.written < op.len {
		op.buf = op.buf[completion.result:]

		if op.offset >= 0 {
			op.offset += int(completion.result)
		}

		write_enqueue(io, completion, op)
		return
	}

	op.callback(completion.user_data, op.written, os.ERROR_NONE)
	pool_put(&io.completion_pool, completion)
}

timeout_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {
	_, err := io_uring.timeout(&io.ring, u64(uintptr(completion)), &op.expires, 0, 0)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

timeout_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Timeout) {
	if completion.result < 0 {
		errno := os.Platform_Error(-completion.result)
		#partial switch errno {
		case .ETIME: // OK.
		case .EINTR, .EWOULDBLOCK:
			timeout_enqueue(io, completion, op)
			return
		case:
			fmt.panicf("timeout error: %v", errno)
		}
	}

	op.callback(completion.user_data)
	pool_put(&io.completion_pool, completion)
}

next_tick_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Next_Tick) {
	op.callback(completion.user_data)
	pool_put(&io.completion_pool, completion)
}

poll_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {
	events: linux.Fd_Poll_Events
	switch op.event {
	case .Read:  events = linux.Fd_Poll_Events{.IN}
	case .Write: events = linux.Fd_Poll_Events{.OUT}
	}

	flags: io_uring.IORing_Poll_Flags
	if op.multi {
		flags = io_uring.IORing_Poll_Flags{.ADD_MULTI}
	}

	_, err := io_uring.poll_add(&io.ring, u64(uintptr(completion)), op.fd, events, flags)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

poll_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll) {
	op.callback(completion.user_data, op.event)
	if !op.multi {
		pool_put(&io.completion_pool, completion)
	}
}

poll_remove_enqueue :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {
	events: linux.Fd_Poll_Events
	switch op.event {
	case .Read:  events = linux.Fd_Poll_Events{.IN}
	case .Write: events = linux.Fd_Poll_Events{.OUT}
	}

	_, err := io_uring.poll_remove(&io.ring, u64(uintptr(completion)), op.fd, events)
	if err == .Submission_Queue_Full {
		queue.push_back(&io.unqueued, completion)
		return
	}

	io.ios_queued += 1
}

poll_remove_callback :: proc(io: ^IO, completion: ^Completion, op: ^Op_Poll_Remove) {
	pool_put(&io.completion_pool, completion)
}

ring_err_to_os_err :: proc(err: io_uring.IO_Uring_Error) -> os.Errno {
	switch err {
	case .None:
		return os.ERROR_NONE
	case .Params_Outside_Accessible_Address_Space, .Buffer_Invalid, .File_Descriptor_Invalid, .Submission_Queue_Entry_Invalid, .Ring_Shutting_Down:
		return os.EFAULT
	case .Arguments_Invalid, .Entries_Zero, .Entries_Too_Large, .Entries_Not_Power_Of_Two, .Opcode_Not_Supported:
		return os.EINVAL
	case .Process_Fd_Quota_Exceeded:
		return os.EMFILE
	case .System_Fd_Quota_Exceeded:
		return os.ENFILE
	case .System_Resources, .Completion_Queue_Overcommitted:
		return os.ENOMEM
	case .Permission_Denied:
		return os.EPERM
	case .System_Outdated:
		return os.ENOSYS
	case .Submission_Queue_Full:
		return os.EOVERFLOW
	case .Signal_Interrupt:
		return os.EINTR
	case .Unexpected:
		fallthrough
	case:
		return os.Platform_Error(-1)
	}
}

// verbatim copy of net._sockaddr_storage_to_endpoint.
sockaddr_storage_to_endpoint :: proc(native_addr: ^os.SOCKADDR_STORAGE_LH) -> (ep: net.Endpoint) {
	switch native_addr.ss_family {
	case u16(os.AF_INET):
		addr := cast(^os.sockaddr_in)native_addr
		port := int(addr.sin_port)
		ep = net.Endpoint {
			address = net.IP4_Address(transmute([4]byte)addr.sin_addr),
			port    = port,
		}
	case u16(os.AF_INET6):
		addr := cast(^os.sockaddr_in6)native_addr
		port := int(addr.sin6_port)
		ep = net.Endpoint {
			address = net.IP6_Address(transmute([8]u16be)addr.sin6_addr),
			port    = port,
		}
	case:
		panic("native_addr is neither IP4 or IP6 address")
	}
	return
}

// verbatim copy of net._endpoint_to_sockaddr.
endpoint_to_sockaddr :: proc(ep: net.Endpoint) -> (sockaddr: os.SOCKADDR_STORAGE_LH) {
	switch a in ep.address {
	case net.IP4_Address:
		(^os.sockaddr_in)(&sockaddr)^ = os.sockaddr_in {
			sin_family = u16(os.AF_INET),
			sin_port   = u16be(ep.port),
			sin_addr   = transmute(os.in_addr)a,
		}
		return
	case net.IP6_Address:
		(^os.sockaddr_in6)(&sockaddr)^ = os.sockaddr_in6 {
			sin6_family = u16(os.AF_INET6),
			sin6_port   = u16be(ep.port),
			sin6_addr   = transmute(os.in6_addr)a,
		}
		return
	}
	unreachable()
}


================================================
FILE: old_nbio/nbio_internal_windows.odin
================================================
#+private
package nbio

import "base:runtime"

import "core:container/queue"
import "core:log"
import "core:mem"
import "core:net"
import "core:os"
import "core:time"

import win "core:sys/windows"

_IO :: struct {
	iocp:            win.HANDLE,
	allocator:       mem.Allocator,
	timeouts:        [dynamic]^Completion,
	completed:       queue.Queue(^Completion),
	completion_pool: Pool(Completion),
	io_pending:      int,
	// The asynchronous Windows API's don't support reading at the current offset of a file, so we keep track ourselves.
	offsets:         map[os.Handle]u32,
}

_Completion :: struct {
	over: win.OVERLAPPED,
	ctx:  runtime.Context,
	op:   Operation,
}
#assert(offset_of(Completion, over) == 0, "needs to be the first field to work")

Op_Accept :: struct {
	callback: On_Accept,
	socket:   win.SOCKET,
	client:   win.SOCKET,
	addr:     win.SOCKADDR_STORAGE_LH,
	pending:  bool,
}

Op_Connect :: struct {
	callback: On_Connect,
	socket:   win.SOCKET,
	addr:     win.SOCKADDR_STORAGE_LH,
	pending:  bool,
}

Op_Close :: struct {
	callback: On_Close,
	fd:       Closable,
}

Op_Read :: struct {
	callback: On_Read,
	fd:       os.Handle,
	offset:   int,
	buf:      []byte,
	pending:  bool,
	all:      bool,
	read:     int,
	len:      int,
}

Op_Write :: struct {
	callback: On_Write,
	fd:       os.Handle,
	offset:   int,
	buf:      []byte,
	pending:  bool,

	written:  int,
	len:      int,
	all:      bool,
}

Op_R
Download .txt
gitextract_m0_ssysi/

├── .editorconfig
├── .github/
│   └── workflows/
│       ├── ci.yml
│       ├── docs.yml
│       └── openssl.yml
├── .gitignore
├── LICENSE
├── README.md
├── allocator.odin
├── body.odin
├── client/
│   ├── client.odin
│   └── communication.odin
├── comparisons/
│   └── empty-ok-all/
│       ├── README.md
│       ├── bun/
│       │   ├── .gitignore
│       │   ├── bun.lockb
│       │   ├── index.ts
│       │   ├── package.json
│       │   └── tsconfig.json
│       ├── go/
│       │   └── main.go
│       ├── node/
│       │   └── app.js
│       ├── odin/
│       │   └── main.odin
│       └── rust/
│           ├── .gitignore
│           ├── Cargo.toml
│           └── src/
│               └── main.rs
├── cookie.odin
├── docs/
│   ├── all.odin
│   ├── generate.sh
│   └── odin-doc.json
├── examples/
│   ├── client/
│   │   └── main.odin
│   └── tcp_echo/
│       └── main.odin
├── handlers.odin
├── headers.odin
├── http.odin
├── mimes.odin
├── mod.pkg
├── odinfmt.json
├── old_nbio/
│   ├── README.md
│   ├── _io_uring/
│   │   ├── os.odin
│   │   └── sys.odin
│   ├── doc.odin
│   ├── nbio.odin
│   ├── nbio_darwin.odin
│   ├── nbio_internal_darwin.odin
│   ├── nbio_internal_linux.odin
│   ├── nbio_internal_windows.odin
│   ├── nbio_linux.odin
│   ├── nbio_test.odin
│   ├── nbio_unix.odin
│   ├── nbio_windows.odin
│   ├── poly/
│   │   └── poly.odin
│   └── pool.odin
├── openssl/
│   ├── .version
│   ├── includes/
│   │   └── windows/
│   │       ├── libcrypto.lib
│   │       ├── libcrypto_static.lib
│   │       ├── libssl.lib
│   │       └── libssl_static.lib
│   └── openssl.odin
├── request.odin
├── response.odin
├── responses.odin
├── routing.odin
├── scanner.odin
├── server.odin
└── status.odin
Download .txt
SYMBOL INDEX (4 symbols across 3 files)

FILE: comparisons/empty-ok-all/bun/index.ts
  method fetch (line 3) | fetch(req) {

FILE: comparisons/empty-ok-all/go/main.go
  function main (line 7) | func main() {
  function HelloServer (line 12) | func HelloServer(w http.ResponseWriter, r *http.Request) {}

FILE: comparisons/empty-ok-all/rust/src/main.rs
  function main (line 4) | async fn main() -> std::io::Result<()> {
Condensed preview — 63 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (365K chars).
[
  {
    "path": ".editorconfig",
    "chars": 175,
    "preview": "root = true\n\n[*]\nend_of_line = lf\ninsert_final_newline = true\nindent_style = tab\nindent_size = 4\ntrim_trailing_whitespac"
  },
  {
    "path": ".github/workflows/ci.yml",
    "chars": 737,
    "preview": "name: CI\non:\n  push:\n  workflow_dispatch:\n  schedule:\n    - cron: 0 20 * * *\n\nenv:\n  FORCE_COLOR: \"1\"\n\njobs:\n  check:\n  "
  },
  {
    "path": ".github/workflows/docs.yml",
    "chars": 1223,
    "preview": "name: Deploy docs to GitHub pages\n\non:\n  push:\n    branches: [main]\n  workflow_dispatch:\n\nenv:\n  FORCE_COLOR: \"1\"\n\npermi"
  },
  {
    "path": ".github/workflows/openssl.yml",
    "chars": 4165,
    "preview": "name: OpenSSL\non:\n  push:\n    paths: [\".github/workflows/openssl.yml\"]\n    branches: [\"main\"]\n  workflow_dispatch:\n  sch"
  },
  {
    "path": ".gitignore",
    "chars": 102,
    "preview": "*.bin\nols.json\nopm\nTaskfile.yml\n*.exe\ndocs/build\n\n# Example binaries.\nminimal\ncomplete\nreadme\nrouting\n"
  },
  {
    "path": "LICENSE",
    "chars": 1056,
    "preview": "Copyright (c) 2023 Laytan Laats\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this so"
  },
  {
    "path": "README.md",
    "chars": 6831,
    "preview": "# Odin HTTP\n\nA HTTP/1.1 implementation for Odin purely written in Odin (besides SSL).\n\nSee generated package documentati"
  },
  {
    "path": "allocator.odin",
    "chars": 7077,
    "preview": "#+private\n#+build ignore\npackage http\n\n// NOTE: currently not in use, had a strange crash I can't figure out.\n\nimport \"c"
  },
  {
    "path": "body.odin",
    "chars": 8629,
    "preview": "package http\n\nimport \"core:bufio\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"cor"
  },
  {
    "path": "client/client.odin",
    "chars": 13805,
    "preview": "// package provides a very simple (for now) HTTP/1.1 client.\npackage client\n\nimport \"core:bufio\"\nimport \"core:bytes\"\nimp"
  },
  {
    "path": "client/communication.odin",
    "chars": 7222,
    "preview": "#+private\npackage client\n\nimport \"core:bufio\"\nimport \"core:bytes\"\nimport \"core:c\"\nimport \"core:io\"\nimport \"core:log\"\nimp"
  },
  {
    "path": "comparisons/empty-ok-all/README.md",
    "chars": 1557,
    "preview": "# Comparison - Empty OK All\n\nThis comparison measures raw IO rate, the server needs to respond to requests on port :8080"
  },
  {
    "path": "comparisons/empty-ok-all/bun/.gitignore",
    "chars": 2172,
    "preview": "# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore\n\n# Logs\n\nlogs\n_.log\nnpm-debug.log_\nyar"
  },
  {
    "path": "comparisons/empty-ok-all/bun/index.ts",
    "chars": 152,
    "preview": "const server = Bun.serve({\n\tport: 8080,\n\tfetch(req) {\n\t\treturn new Response();\n\t},\n});\n\nconsole.log(`Listening on http:/"
  },
  {
    "path": "comparisons/empty-ok-all/bun/package.json",
    "chars": 182,
    "preview": "{\n  \"name\": \"empty-ok-all\",\n  \"module\": \"index.ts\",\n  \"type\": \"module\",\n  \"devDependencies\": {\n    \"bun-types\": \"latest\""
  },
  {
    "path": "comparisons/empty-ok-all/bun/tsconfig.json",
    "chars": 519,
    "preview": "{\n  \"compilerOptions\": {\n    \"lib\": [\"ESNext\"],\n    \"module\": \"esnext\",\n    \"target\": \"esnext\",\n    \"moduleResolution\": "
  },
  {
    "path": "comparisons/empty-ok-all/go/main.go",
    "chars": 194,
    "preview": "package main\n\nimport (\n    \"net/http\"\n)\n\nfunc main() {\n    http.HandleFunc(\"/\", HelloServer)\n    http.ListenAndServe(\":8"
  },
  {
    "path": "comparisons/empty-ok-all/node/app.js",
    "chars": 325,
    "preview": "const http = require('http');\n\nconst hostname = '127.0.0.1';\nconst port = 8080;\n\nconst server = http.createServer((req, "
  },
  {
    "path": "comparisons/empty-ok-all/odin/main.odin",
    "chars": 306,
    "preview": "package empty_ok_all\n\nimport \"core:fmt\"\n\nimport http \"../../..\"\n\nmain :: proc() {\n\ts: http.Server\n\n\tfmt.println(\"Listeni"
  },
  {
    "path": "comparisons/empty-ok-all/rust/.gitignore",
    "chars": 7,
    "preview": "target\n"
  },
  {
    "path": "comparisons/empty-ok-all/rust/Cargo.toml",
    "chars": 248,
    "preview": "[package]\nname = \"rust\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-la"
  },
  {
    "path": "comparisons/empty-ok-all/rust/src/main.rs",
    "chars": 268,
    "preview": "use actix_web::{web, App, HttpServer};\n\n#[actix_web::main]\nasync fn main() -> std::io::Result<()> {\n    HttpServer::new("
  },
  {
    "path": "cookie.odin",
    "chars": 9105,
    "preview": "package http\n\nimport \"core:io\"\nimport \"core:strconv\"\nimport \"core:strings\"\nimport \"core:time\"\n\nCookie_Same_Site :: enum "
  },
  {
    "path": "docs/all.odin",
    "chars": 180,
    "preview": "/*\nThis file simply imports any packages we want in the documentation.\n*/\npackage docs\n\nimport \"../client\"\nimport http \""
  },
  {
    "path": "docs/generate.sh",
    "chars": 403,
    "preview": "#!/usr/bin/env bash\n\nset -ex\n\ncd docs\n\nrm -rf build\nmkdir build\n\nodin doc . -all-packages -doc-format\n\ncd build\n\n# This "
  },
  {
    "path": "docs/odin-doc.json",
    "chars": 551,
    "preview": "{\n    \"hide_core\": true,\n\t\"hide_base\": true,\n    \"collections\": {\n        \"odin-http\": {\n            \"name\": \"http\",\n   "
  },
  {
    "path": "examples/client/main.odin",
    "chars": 1489,
    "preview": "package client_example\n\nimport \"core:fmt\"\n\nimport \"../../client\"\n\nmain :: proc() {\n\tget()\n\tpost()\n}\n\n// basic get reques"
  },
  {
    "path": "examples/tcp_echo/main.odin",
    "chars": 1863,
    "preview": "package example_tcp_echo\n\nimport \"core:fmt\"\nimport \"core:net\"\nimport \"core:os\"\n\nimport nbio \"../../nbio/poly\"\n\nEcho_Serv"
  },
  {
    "path": "handlers.odin",
    "chars": 3086,
    "preview": "package http\n\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:sync\"\nimport \"core:time\"\n\nHandler_Proc :: proc(handle"
  },
  {
    "path": "headers.odin",
    "chars": 4378,
    "preview": "package http\n\nimport \"core:strings\"\n\n// A case-insensitive ASCII map for storing headers.\nHeaders :: struct {\n\t_kv:     "
  },
  {
    "path": "http.odin",
    "chars": 12249,
    "preview": "package http\n\nimport \"base:runtime\"\n\nimport \"core:io\"\nimport \"core:slice\"\nimport \"core:strconv\"\nimport \"core:strings\"\nim"
  },
  {
    "path": "mimes.odin",
    "chars": 1347,
    "preview": "package http\n\nimport \"core:path/filepath\"\n\nMime_Type :: enum {\n\tPlain,\n\n\tCss,\n\tCsv,\n\tGif,\n\tHtml,\n\tIco,\n\tJpeg,\n\tJs,\n\tJson"
  },
  {
    "path": "mod.pkg",
    "chars": 201,
    "preview": "{\n\t\"version\": \"0.0.4-beta\",\n\t\"description\": \"A HTTP/1.1 client/server implementation\",\n\t\"url\": \"https://github.com/layta"
  },
  {
    "path": "odinfmt.json",
    "chars": 61,
    "preview": "{\n\t\"character_width\": 120,\n\t\"tabs\": true,\n\t\"tabs_width\": 4\n}\n"
  },
  {
    "path": "old_nbio/README.md",
    "chars": 2793,
    "preview": "# package nbio\n\nPackage nbio implements a non blocking IO abstraction layer over several platform specific APIs.\n\nThis p"
  },
  {
    "path": "old_nbio/_io_uring/os.odin",
    "chars": 20854,
    "preview": "#+build linux\npackage io_uring\n\nimport \"core:math\"\nimport \"core:os\"\nimport \"core:sync\"\nimport \"core:sys/linux\"\nimport \"c"
  },
  {
    "path": "old_nbio/_io_uring/sys.odin",
    "chars": 13153,
    "preview": "#+build linux\npackage io_uring\n\nimport \"base:intrinsics\"\n\n//odinfmt:disable\nSYS_io_uring_setup:    uintptr : 425\nSYS_io_"
  },
  {
    "path": "old_nbio/doc.odin",
    "chars": 2731,
    "preview": "/*\npackage nbio implements a non blocking IO abstraction layer over several platform specific APIs.\n\nThis package implem"
  },
  {
    "path": "old_nbio/nbio.odin",
    "chars": 26333,
    "preview": "package nbio\n\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\n\n/*\nThe main IO type that holds the platform dependa"
  },
  {
    "path": "old_nbio/nbio_darwin.odin",
    "chars": 6280,
    "preview": "package nbio\n\nimport \"core:container/queue\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\nimport \"core:sys/kqueu"
  },
  {
    "path": "old_nbio/nbio_internal_darwin.odin",
    "chars": 12558,
    "preview": "#+private\npackage nbio\n\nimport \"base:runtime\"\n\nimport \"core:container/queue\"\nimport \"core:mem\"\nimport \"core:net\"\nimport "
  },
  {
    "path": "old_nbio/nbio_internal_linux.odin",
    "chars": 16392,
    "preview": "#+private\npackage nbio\n\nimport \"base:runtime\"\n\nimport \"core:c\"\nimport \"core:container/queue\"\nimport \"core:fmt\"\nimport \"c"
  },
  {
    "path": "old_nbio/nbio_internal_windows.odin",
    "chars": 15007,
    "preview": "#+private\npackage nbio\n\nimport \"base:runtime\"\n\nimport \"core:container/queue\"\nimport \"core:log\"\nimport \"core:mem\"\nimport "
  },
  {
    "path": "old_nbio/nbio_linux.odin",
    "chars": 7440,
    "preview": "package nbio\n\nimport \"core:container/queue\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:sys/linux\"\nimport \"core:time"
  },
  {
    "path": "old_nbio/nbio_test.odin",
    "chars": 8936,
    "preview": "package nbio\n\nimport \"core:fmt\"\nimport \"core:log\"\nimport \"core:mem\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:slic"
  },
  {
    "path": "old_nbio/nbio_unix.odin",
    "chars": 1519,
    "preview": "#+build darwin, linux\n#+private\npackage nbio\n\nimport \"core:net\"\nimport \"core:os\"\n\n_open :: proc(_: ^IO, path: string, mo"
  },
  {
    "path": "old_nbio/nbio_windows.odin",
    "chars": 9522,
    "preview": "package nbio\n\nimport \"core:container/queue\"\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:os\"\nimport \"core:time\"\n\nimp"
  },
  {
    "path": "old_nbio/poly/poly.odin",
    "chars": 43093,
    "preview": "// Package nbio/poly contains variants of the nbio procedures that use generic/poly data\n// so users can avoid casts and"
  },
  {
    "path": "old_nbio/pool.odin",
    "chars": 1302,
    "preview": "#+private\npackage nbio\n\nimport \"core:container/queue\"\nimport \"core:mem\"\nimport \"core:mem/virtual\"\n\n// An object pool whe"
  },
  {
    "path": "openssl/.version",
    "chars": 14,
    "preview": "openssl-3.6.2\n"
  },
  {
    "path": "openssl/openssl.odin",
    "chars": 2250,
    "preview": "package openssl\n\nimport \"core:c\"\nimport \"core:c/libc\"\n\nSHARED :: #config(OPENSSL_SHARED, false)\n\nwhen ODIN_OS == .Window"
  },
  {
    "path": "request.odin",
    "chars": 2274,
    "preview": "package http\n\nimport \"core:net\"\nimport \"core:strings\"\n\nRequest :: struct {\n\t// If in a handler, this is always there and"
  },
  {
    "path": "response.odin",
    "chars": 11935,
    "preview": "package http\n\nimport \"core:bytes\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:mem/virtual\"\nimport \"core:nbio\"\nimport"
  },
  {
    "path": "responses.odin",
    "chars": 5899,
    "preview": "package http\n\nimport \"core:bytes\"\nimport \"core:encoding/json\"\nimport \"core:io\"\nimport \"core:log\"\nimport \"core:nbio\"\nimpo"
  },
  {
    "path": "routing.odin",
    "chars": 6833,
    "preview": "package http\n\nimport \"base:runtime\"\n\nimport \"core:log\"\nimport \"core:net\"\nimport \"core:strconv\"\nimport \"core:strings\"\nimp"
  },
  {
    "path": "scanner.odin",
    "chars": 6029,
    "preview": "#+private\npackage http\n\nimport \"core:mem/virtual\"\nimport \"base:intrinsics\"\n\nimport \"core:bufio\"\nimport \"core:nbio\"\nimpor"
  },
  {
    "path": "server.odin",
    "chars": 17762,
    "preview": "package http\n\nimport \"base:runtime\"\n\nimport \"core:bufio\"\nimport \"core:bytes\"\nimport \"core:c/libc\"\nimport \"core:fmt\"\nimpo"
  },
  {
    "path": "status.odin",
    "chars": 4618,
    "preview": "package http\n\nimport \"base:runtime\"\n\nimport \"core:fmt\"\nimport \"core:strings\"\n\nStatus :: enum {\n\tContinue                "
  }
]

// ... and 5 more files (download for full content)

About this extraction

This page contains the full source code of the laytan/odin-http GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 63 files (47.9 MB), approximately 98.8k tokens, and a symbol index with 4 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!