Repository: 1c3t3a/rust-socketio
Branch: main
Commit: a4e52873105c
Files: 85
Total size: 375.4 KB
Directory structure:
gitextract_9tvnxqqn/
├── .devcontainer/
│ ├── Dockerfile
│ ├── devcontainer.json
│ └── docker-compose.yaml
├── .github/
│ ├── dependabot.yml
│ └── workflows/
│ ├── benchmark.yml
│ ├── build.yml
│ ├── coverage.yml
│ ├── publish-dry-run.yml
│ ├── publish.yml
│ └── test.yml
├── .gitignore
├── CHANGELOG.md
├── CONTRIBUTING.md
├── Cargo.toml
├── LICENSE
├── Makefile
├── README.md
├── Roadmap.md
├── ci/
│ ├── .dockerignore
│ ├── Dockerfile
│ ├── README.md
│ ├── engine-io-polling.js
│ ├── engine-io-secure.js
│ ├── engine-io.js
│ ├── keygen.sh
│ ├── package.json
│ ├── socket-io-auth.js
│ ├── socket-io-restart-url-auth.js
│ ├── socket-io-restart.js
│ ├── socket-io.js
│ └── start_test_server.sh
├── codecov.yml
├── engineio/
│ ├── Cargo.toml
│ ├── README.md
│ ├── benches/
│ │ └── engineio.rs
│ └── src/
│ ├── asynchronous/
│ │ ├── async_socket.rs
│ │ ├── async_transports/
│ │ │ ├── mod.rs
│ │ │ ├── polling.rs
│ │ │ ├── websocket.rs
│ │ │ ├── websocket_general.rs
│ │ │ └── websocket_secure.rs
│ │ ├── callback.rs
│ │ ├── client/
│ │ │ ├── async_client.rs
│ │ │ ├── builder.rs
│ │ │ └── mod.rs
│ │ ├── generator.rs
│ │ ├── mod.rs
│ │ └── transport.rs
│ ├── callback.rs
│ ├── client/
│ │ ├── client.rs
│ │ └── mod.rs
│ ├── error.rs
│ ├── header.rs
│ ├── lib.rs
│ ├── packet.rs
│ ├── socket.rs
│ ├── transport.rs
│ └── transports/
│ ├── mod.rs
│ ├── polling.rs
│ ├── websocket.rs
│ └── websocket_secure.rs
└── socketio/
├── Cargo.toml
├── examples/
│ ├── async.rs
│ ├── callback.rs
│ ├── readme.rs
│ └── secure.rs
└── src/
├── asynchronous/
│ ├── client/
│ │ ├── ack.rs
│ │ ├── builder.rs
│ │ ├── callback.rs
│ │ ├── client.rs
│ │ └── mod.rs
│ ├── generator.rs
│ ├── mod.rs
│ └── socket.rs
├── client/
│ ├── builder.rs
│ ├── callback.rs
│ ├── client.rs
│ ├── mod.rs
│ └── raw_client.rs
├── error.rs
├── event.rs
├── lib.rs
├── packet.rs
├── payload.rs
└── socket.rs
================================================
FILE CONTENTS
================================================
================================================
FILE: .devcontainer/Dockerfile
================================================
FROM mcr.microsoft.com/vscode/devcontainers/rust:0-1
# Install socat needed for TCP proxy
RUN apt update && apt install -y socat
COPY ./ci/cert/ca.crt /usr/local/share/ca-certificates/
RUN update-ca-certificates
================================================
FILE: .devcontainer/devcontainer.json
================================================
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.187.0/containers/rust
{
"name": "Rust",
"dockerComposeFile": [
"./docker-compose.yaml"
],
"service": "rust-client",
"workspaceFolder": "/workspace/rust-socketio",
"shutdownAction": "stopCompose",
"customizations": {
"vscode": {
// Set *default* container specific settings.json values on container create.
"settings": {
"lldb.executable": "/usr/bin/lldb",
// VS Code don't watch files under ./target
"files.watcherExclude": {
"**/target/**": true
},
"rust-analyzer.cargo.features": [
"async"
]
/*,
// If you prefer rust-analzyer to be less noisy consider these settings to your settings.json
"editor.semanticTokenColorCustomizations": {
"rules": {
"*.mutable": {
"underline": false
}
}
},
"rust-analyzer.inlayHints.parameterHints": false
*/
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"rust-lang.rust-analyzer",
"bungcip.better-toml",
"vadimcn.vscode-lldb",
"eamodio.gitlens",
"streetsidesoftware.code-spell-checker"
]
}
},
"remoteUser": "vscode",
// Start a TCP proxy from to the testing node-socket-io server so doc tests can pass.
"postAttachCommand": {
"SocketIOProxy": "socat TCP-LISTEN:4200,fork,reuseaddr TCP:node-socket-io:4200",
"EngineIOProxy": "socat TCP-LISTEN:4201,fork,reuseaddr TCP:node-engine-io:4201",
"SocketIOAuthProxy": "socat TCP-LISTEN:4204,fork,reuseaddr TCP:node-socket-io-auth:4204"
}
}
================================================
FILE: .devcontainer/docker-compose.yaml
================================================
version: '3'
services:
node-engine-io-secure:
build:
context: ../ci
command: [ "node", "/test/engine-io-secure.js" ]
ports:
- "4202:4202"
environment:
- "DEBUG=*"
node-engine-io:
build:
context: ../ci
command: [ "node", "/test/engine-io.js" ]
ports:
- "4201:4201"
environment:
- "DEBUG=*"
node-engine-io-polling:
build:
context: ../ci
command: [ "node", "/test/engine-io-polling.js" ]
ports:
- "4203:4203"
environment:
- "DEBUG=*"
node-socket-io:
build:
context: ../ci
command: [ "node", "/test/socket-io.js" ]
ports:
- "4200:4200"
environment:
- "DEBUG=*"
node-socket-io-auth:
build:
context: ../ci
command: [ "node", "/test/socket-io-auth.js" ]
ports:
- "4204:4204"
environment:
- "DEBUG=*"
node-socket-restart:
build:
context: ../ci
command: [ "node", "/test/socket-io-restart.js" ]
ports:
- "4205:4205"
environment:
- "DEBUG=*"
node-socket-restart-url-auth:
build:
context: ../ci
command: [ "node", "/test/socket-io-restart-url-auth.js" ]
ports:
- "4206:4206"
environment:
- "DEBUG=*"
rust-client:
build:
context: ..
dockerfile: ./.devcontainer/Dockerfile
command: /bin/sh -c "while sleep 10000d; do :; done"
security_opt:
- seccomp:unconfined
volumes:
- "..:/workspace/rust-socketio"
environment:
- "SOCKET_IO_SERVER=http://node-socket-io:4200"
- "SOCKET_IO_AUTH_SERVER=http://node-socket-io-auth:4204"
- "ENGINE_IO_SERVER=http://node-engine-io:4201"
- "ENGINE_IO_SECURE_SERVER=https://node-engine-io-secure:4202"
- "ENGINE_IO_SECURE_HOST=node-engine-io-secure"
- "ENGINE_IO_POLLING_SERVER=http://node-engine-io-polling:4203"
- "SOCKET_IO_RESTART_SERVER=http://node-socket-restart:4205"
- "SOCKET_IO_RESTART_URL_AUTH_SERVER=http://node-socket-restart-url-auth:4206"
================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "monthly"
groups:
patches:
# Group cargo patch updates together to minimize PR management faff
applies-to: version-updates
update-types:
- patch
================================================
FILE: .github/workflows/benchmark.yml
================================================
on:
pull_request:
types: [opened]
issue_comment:
types: [created]
name: benchmark engine.io
jobs:
runBenchmark:
name: run benchmark
runs-on: ubuntu-latest
steps:
- uses: khan/pull-request-comment-trigger@master
id: check
with:
trigger: '/benchmark'
reaction: rocket
env:
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
- name: Checkout repository
if: steps.check.outputs.triggered == 'true'
uses: actions/checkout@v2
- name: Setup rust environment
if: steps.check.outputs.triggered == 'true'
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Setup docker
if: steps.check.outputs.triggered == 'true'
id: buildx
uses: docker/setup-buildx-action@v1
- name: Generate keys
if: steps.check.outputs.triggered == 'true'
run: make keys
- name: Build docker container
if: steps.check.outputs.triggered == 'true'
run: |
cd ci && docker build -t test_suite:latest .
docker run -d -p 4200:4200 -p 4201:4201 -p 4202:4202 -p 4203:4203 -p 4204:4204 -p 4205:4205 -p 4206:4206 test_suite:latest
- name: Extract branch name
if: steps.check.outputs.triggered == 'true'
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
- uses: actions/checkout@master
if: steps.check.outputs.triggered == 'true'
- uses: boa-dev/criterion-compare-action@v3.2.0
if: steps.check.outputs.triggered == 'true'
with:
cwd: "engineio"
branchName: ${{ steps.extract_branch.outputs.branch }}
token: ${{ secrets.GITHUB_TOKEN }}
================================================
FILE: .github/workflows/build.yml
================================================
name: Build and code style
on:
push:
branches: [main, refactoring]
pull_request:
branches: [main]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup rust environment
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Generate Cargo.lock
run: cargo generate-lockfile
- uses: actions/cache@v2
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Build
run: cargo build --verbose --all-features
- name: Linting
run: cargo clippy --verbose --all-features
- name: Check formatting
run: cargo fmt --all -- --check
================================================
FILE: .github/workflows/coverage.yml
================================================
on:
push:
branches: [main]
pull_request:
branches: [main]
name: generate coverage
jobs:
check:
name: Setup Rust project
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Setup rust environment
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Setup docker
id: buildx
uses: docker/setup-buildx-action@v1
- name: Generate keys
run: make keys
- name: Build docker container
run: |
cd ci && docker build -t test_suite:latest .
docker run -d --name test_suite -p 4200:4200 -p 4201:4201 -p 4202:4202 -p 4203:4203 -p 4204:4204 -p 4205:4205 -p 4206:4206 test_suite:latest
- uses: actions/cache@v2
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Generate code coverage
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
- name: Upload to codecov.io
uses: codecov/codecov-action@v1.5.2
with:
token: ${{secrets.CODECOV_TOKEN}}
files: lcov.info
fail_ci_if_error: true
- name: Collect docker logs
if: always()
run: docker logs test_suite > my_logs.txt 2>&1
- name: Upload docker logs
uses: actions/upload-artifact@v4
if: always()
with:
name: docker logs
path: my_logs.txt
================================================
FILE: .github/workflows/publish-dry-run.yml
================================================
name: Publish dry run
on:
workflow_dispatch
jobs:
publish:
name: Publish dry run
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
# Publish the engine.io crate
- uses: katyo/publish-crates@v1
with:
path: './engineio'
dry-run: true
# Publish the socket.io crate
- uses: katyo/publish-crates@v1
with:
path: './socketio'
dry-run: true
================================================
FILE: .github/workflows/publish.yml
================================================
name: Publish
on:
workflow_dispatch
jobs:
publish:
name: Publish
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
# Publish the engine.io crate
- uses: katyo/publish-crates@v1
with:
path: './engineio'
registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }}
# Publish the socket.io crate
- uses: katyo/publish-crates@v1
with:
path: './socketio'
registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }}
================================================
FILE: .github/workflows/test.yml
================================================
name: Test
on:
push:
branches: [main]
pull_request:
branches: [main, refactoring]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: checkout
uses: actions/checkout@v2
- name: Setup rust environment
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Setup docker
id: buildx
uses: docker/setup-buildx-action@v1
- name: Generate keys
run: make keys
- name: Build docker container
run: |
cd ci && docker build -t test_suite:latest .
docker run -d -p 4200:4200 -p 4201:4201 -p 4202:4202 -p 4203:4203 -p 4204:4204 -p 4205:4205 -p 4206:4206 test_suite:latest
- name: Generate Cargo.lock
run: cargo generate-lockfile
- uses: actions/cache@v2
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run testsuite
run: cargo test --verbose --features "async"
================================================
FILE: .gitignore
================================================
target
.vscode
.idea
ci/node_modules
ci/package-lock.json
ci/cert
================================================
FILE: CHANGELOG.md
================================================
# Changelog
All notable changes to this project are documented in this file.
The format is based on [Keep a Changelog], and this project adheres to
[Semantic Versioning]. The file is auto-generated using [Conventional Commits].
[keep a changelog]: https://keepachangelog.com/en/1.0.0/
[semantic versioning]: https://semver.org/spec/v2.0.0.html
[conventional commits]: https://www.conventionalcommits.org/en/v1.0.0-beta.4/
## Overview
* [unreleased](#unreleased)
* [`0.5.0`](#060) - _2024.04.16_
* [`0.5.0`](#050) - _2024.03.31_
* [`0.4.4`](#044) - _2023.11.18_
* [`0.4.3`](#043) - _2023.07.08_
* [`0.4.2`](#042) - _2023.06.25_
* [`0.4.1-alpha.2`](#041a2) - _2023.03.26_
* [`0.4.1-alpha.1`](#041a1) - _2023.01.15_
* [`0.4.0`](#041) - _2023.01.15_
* [`0.4.0`](#040) - _2022.10.20_
* [`0.3.1`](#031) - _2022.03.19_
* [`0.3.0`](#030) - _2021.12.16_
* [`0.3.0-alpha.2`](#030a3) - _2021.12.04_
* [`0.3.0-alpha.2`](#030a2) - _2021.10.14_
* [`0.3.0-alpha.1`](#030a1) - _2021.09.20_
* [`0.2.4`](#024) - _2021.05.25_
* [`0.2.3`](#023) - _2021.05.24_
* [`0.2.2`](#022) - _2021.05.13
* [`0.2.1`](#021) - _2021.04.27_
* [`0.2.0`](#020) – _2021.03.13_
* [`0.1.1`](#011) – _2021.01.10_
* [`0.1.0`](#010) – _2021.01.05_
## _[Unreleased]_
_nothing new to show for… yet!_>
## [0.6.0] - _Multi-payload fix and http 1.0_
_2024.04.16_
- Fix issues with processing multi-payload messages ([#392](https://github.com/1c3t3a/rust-socketio/pull/392)).
Credits to shenjackyuanjie@.
- Bump http to 1.0 and all dependencies that use http to a version that also uses http 1.0 ([#418](https://github.com/1c3t3a/rust-socketio/pull/418)).
Bumping those dependencies makes this a breaking change.
## [0.5.0] - _Packed with changes!_
_2024.03.31_
- Support multiple arguments to the payload through a new Payload variant called
`Text` that holds a JSON value ([#384](https://github.com/1c3t3a/rust-socketio/pull/384)).
Credits to ctrlaltf24@ and SalahaldinBilal@!
Please note: This is a breaking change: `Payload::String` is deprecated and will be removed soon.
- Async reconnections: Support for automatic reconnection in the async version of the crate!
([#400](https://github.com/1c3t3a/rust-socketio/pull/400)). Credits to rageshkrishna@.
- Add an `on_reconnect` callback that allows to change the connection configuration
([#405](https://github.com/1c3t3a/rust-socketio/pull/405)). Credits to rageshkrishna@.
- Fix bug that ignored the ping interval ([#359](https://github.com/1c3t3a/rust-socketio/pull/359)).
Credits to sirkrypt0@. This is a breaking change that removes the engine.io's stream impl.
It is however replaced by a method called `as_stream` on the engine.io socket.
- Add macro `async_callback` and `async_any_callback` for async callbacks ([#399](https://github.com/1c3t3a/rust-socketio/pull/399).
Credits to shenjackyuanjie@.
## [0.4.4] - _Bump dependencies_
_2023.11.18_
- Bump tungstenite version to v0.20.1 (avoiding security vulnerability) [#368](https://github.com/1c3t3a/rust-socketio/pull/368)
- Updating other dependencies
## [0.4.3] - _Bugfix!_
_2023.07.08_
- Fix of [#323](https://github.com/1c3t3a/rust-socketio/issues/323)
- Marking the async feature optional
## [0.4.2] - _Stabilizing the async interface!_
_2023.06.25_
- Fix "Error while parsing an incomplete packet socketio" on first heartbeat killing the connection async client
([#311](https://github.com/1c3t3a/rust-socketio/issues/311)). Credits to [@sirkrypt0](https://github.com/sirkrypt0)
- Fix allow awaiting async callbacks ([#313](https://github.com/1c3t3a/rust-socketio/issues/313)). Credits to [@felix-gohla](https://github.com/felix-gohla)
- Various performance improvements especially in packet parsing. Credits to [@MaxOhn](https://github.com/MaxOhn)
- API for setting the reconnect URL on a connected client ([#251](https://github.com/1c3t3a/rust-socketio/issues/251)).
Credits to [@tyilo](https://github.com/tyilo)
## [0.4.0-alpha.2] - _Async socket.io fixes_
_2023.03.26_
- Add `on_any` method for async `ClientBuilder`. This adds the capability to
react to all incoming events (custom and otherwise).
- Add `auth` option to async `ClientBuilder`. This allows for specifying JSON
data that is sent with the first open packet, which is commonly used for
authentication.
- Bump dependencies and remove calls to deprecated library functions.
## [0.4.0-alpha.1] - _Async socket.io version_
_2023.01.05_
- Add an async socket.io interface under the `async` feature flag, relevant PR: [#180](https://github.com/1c3t3a/rust-socketio/pull/180).
- See example code under `socketio/examples/async.rs` and in the `async` section of the README.
## [0.4.1] - _Minor enhancements_
_2023.01.05_
- As of [#264](https://github.com/1c3t3a/rust-socketio/pull/264), the callbacks
are now allowed to be `?Sync`.
- As of [#265](https://github.com/1c3t3a/rust-socketio/pull/265), the `Payload`
type now implements `AsRef`.
## [0.4.0] - _Bugfixes and Reconnection feature_
_2022.10.20_
### Changes
- Fix [#214](https://github.com/1c3t3a/rust-socketio/issues/214).
- Fix [#215](https://github.com/1c3t3a/rust-socketio/issues/215).
- Fix [#219](https://github.com/1c3t3a/rust-socketio/issues/219).
- Fix [#221](https://github.com/1c3t3a/rust-socketio/issues/221).
- Fix [#222](https://github.com/1c3t3a/rust-socketio/issues/222).
- BREAKING: The default Client returned by the builder will automatically reconnect to the server unless stopped manually.
The new `ReconnectClient` encapsulates this behaviour.
Special thanks to [@SSebo](https://github.com/SSebo) for his major contribution to this release.
## [0.3.1] - _Bugfix_
_2022.03.19_
### Changes
- Fixes regarding [#166](https://github.com/1c3t3a/rust-socketio/issues/166).
## [0.3.0] - _Stabilize alpha version_
_2021.12.16_
### Changes
- Stabilized alpha features.
- Fixes regarding [#133](https://github.com/1c3t3a/rust-socketio/issues/133).
## [0.3.0-alpha.3] - _Bugfixes_
_2021.12.04_
### Changes
- fix a bug that resulted in a blocking `emit` method (see [#133](https://github.com/1c3t3a/rust-socketio/issues/133)).
- Bump dependencies.
## [0.3.0-alpha.2] - _Further refactoring_
_2021.10.14_
### Changes
* Rename `Socket` to `Client` and `SocketBuilder` to `ClientBuilder`
* Removed headermap from pub use, internal type only
* Deprecations:
* crate::payload (use crate::Payload instead)
* crate::error (use crate::Error instead)
* crate::event (use crate::Event instead)
## [0.3.0-alpha.1] - _Refactoring_
_2021.09.20_
### Changes
* Refactored Errors
* Renamed EmptyPacket to EmptyPacket()
* Renamed IncompletePacket to IncompletePacket()
* Renamed InvalidPacket to InvalidPacket()
* Renamed Utf8Error to InvalidUtf8()
* Renamed Base64Error to InvalidBase64
* Renamed InvalidUrl to InvalidUrlScheme
* Renamed ReqwestError to IncompleteResponseFromReqwest
* Renamed HttpError to IncompleteHttp
* Renamed HandshakeError to InvalidHandshake
* Renamed ~ActionBeforeOpen to IllegalActionBeforeOpen()~
* Renamed DidNotReceiveProperAck to MissingAck
* Renamed PoisonedLockError to InvalidPoisonedLock
* Renamed FromWebsocketError to IncompleteResponseFromWebsocket
* Renamed FromWebsocketParseError to InvalidWebsocketURL
* Renamed FromIoError to IncompleteIo
* New error type InvalidUrl(UrlParseError)
* New error type InvalidInteger(ParseIntError)
* New error type IncompleteResponseFromEngineIo(rust_engineio::Error)
* New error type InvalidAttachmentPacketType(u8)
* Removed EmptyPacket
* Refactored Packet
* Renamed encode to From<&Packet>
* Renamed decode to TryFrom<&Bytes>
* Renamed attachments to attachments_count
* New struct member attachments: Option>
* Refactor PacketId
* Renamed u8_to_packet_id to TryFrom for PacketId
* Refactored SocketBuilder
* Renamed set_namespace to namespace
* Renamed set_tls_config to tls_config
* Renamed set_opening_header to opening_header
* namespace returns Self rather than Result
* opening_header accepts a Into rather than HeaderValue
* Allows for pure websocket connections
* Refactor EngineIO module
## [0.2.4] - _Bugfixes_
_2021.05.25_
### Changes
* Fixed a bug that prevented the client from receiving data for a message event issued on the server.
## [0.2.3] - _Disconnect methods on the Socket struct_
_2021.05.24_
### Changes
* Added a `disconnect` method to the `Socket` struct as requested in [#43](https://github.com/1c3t3a/rust-socketio/issues/43).
## [0.2.2] - _Safe websockets and custom headers_
_2021.05.13_
### Changes
* Added websocket communication over TLS when either `wss`, or `https` are specified in the URL.
* Added the ability to configure the TLS connection by providing an own `TLSConnector`.
* Added the ability to set custom headers as requested in [#35](https://github.com/1c3t3a/rust-socketio/issues/35).
## [0.2.1] - _Bugfixes_
_2021.04.27_
### Changes
* Corrected memory ordering issues which might have become an issue on certain platforms.
* Added this CHANGELOG to keep track of all changes.
* Small stylistic changes to the codebase in general.
## [0.2.0] - _Fully implemented the socket.io protocol 🎉_
_2021.03.13_
### Changes
* Moved from async rust to sync rust.
* Implemented the missing protocol features.
* Websocket as a transport layer.
* Binary payload.
* Added a `SocketBuilder` class to easily configure a connected client.
* Added a new `Payload` type to manage the binary and string payload.
## [0.1.1] - _Update to tokio 1.0_
_2021.01.10_
### Changes
* Bumped `tokio` to version `1.0.*`, and therefore reqwest to `0.11.*`.
* Removed all `unsafe` code.
## [0.1.0] - _First release of rust-socketio 🎉_
_2021.01.05_
* First version of the library written in async rust. The features included:
* connecting to a server.
* register callbacks for the following event types:
* open, close, error, message
* custom events like "foo", "on_payment", etc.
* send json-data to the server (recommended to use serde_json as it provides safe handling of json data).
* send json-data to the server and receive an ack with a possible message.
================================================
FILE: CONTRIBUTING.md
================================================
# Introduction
Contributions to this project are welcome!
This project is still being developed, our goal is to have a well-designed
thought-out project, so when you make a contribution, please think in those
terms. That is:
- For code:
- Is the contribution in the scope of this crate?
- Is it well documented?
- Is it well tested?
- For documentation:
- Is it clear and well-written?
- Can others understand it easily?
- For bugs:
- Does it test functionality of this crate?
- Do you have a minimal crate that causes the issue that we can use and test?
- For feature requests:
- Is the request within the scope of this crate?
- Is the request clearly explained?
## Licensing and other property rights.
All contributions that are made to this project are only accepted under the
terms in the [LICENSE](LICENSE) file. That is, if you contribute to this
project, you are certifying that you have all the necessary rights to make the
contribution under the terms of the [LICENSE](LICENSE) file, and you are in fact
licensing them to this project and anyone that uses this project under the terms
in the [LICENSE](LICENSE) file.
## Misc
- We are looking at adopting the [conventional commits 1.0](https://www.conventionalcommits.org/en/v1.0.0/) standard.
- This would make it easier for us to use tools like [jilu](https://crates.io/crates/jilu) to create change logs.
- Read [keep a changelog](https://keepachangelog.com/en/1.0.0/) for more information.
## Git hooks
> Git hooks are scripts that Git executes before or after events such as: commit, push, and receive. Git hooks are a built-in feature - no need to download anything. Git hooks are run locally.
- [githooks.com](https://githooks.com/)
These example hooks enforce some of these contributing guidelines so you don't need to remember them.
### pre-commit
Put the contents below in ./.git/hooks/pre-commit
```bash
#!/bin/bash
set -e
set -o pipefail
# Clippy recomendations
cargo clippy --verbose
# Check formatting
cargo fmt --all -- --check || {
cargo fmt
echo "Formatted some files make sure to check them in."
exit 1
}
# Make sure our build passes
cargo build --verbose
```
### commit-msg
Put the contents below in ./.git/hooks/commit-msg
```bash
#!/bin/bash
# https://dev.to/craicoverflow/enforcing-conventional-commits-using-git-hooks-1o5p
regexp="^(revert: )?(fix|feat|docs|ci|refactor|style|test)(\(.*?\))?(\!)?: .+$"
msg="$(head -1 $1)"
if [[ ! $msg =~ $regexp ]]
then
echo -e "INVALID COMMIT MESSAGE"
echo -e "------------------------"
echo -e "Valid types: fix, feat, docs, ci, style, test, refactor"
echo -e "Such as: 'feat: add new feature'"
echo -e "See https://www.conventionalcommits.org/en/v1.0.0/ for details"
echo
# exit with an error
exit 1
fi
while read line
do
if [[ $(echo "$line" | wc -c) -gt 51 ]]
then
echo "Line '$line' is longer than 50 characters."
echo "Consider splitting into these two lines"
echo "$line" | head -c 50
echo
echo "$line" | tail -c +51
echo
exit 1
fi
done < $1
```
================================================
FILE: Cargo.toml
================================================
[workspace]
members = ["engineio", "socketio"]
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2021 Bastian Kersting
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: Makefile
================================================
.PHONY: build test-fast run-test-servers test-all clippy format checks pipeline
build:
@cargo build --verbose --all-features
keys:
@./ci/keygen.sh node-engine-io-secure 127.0.0.1
test-fast: keys
@cargo test --verbose --package rust_socketio --lib -- engineio::packet && cargo test --verbose --package rust_socketio --lib -- socketio::packet
run-test-servers:
cd ci && docker build -t test_suite:latest . && cd ..
docker run -d -p 4200:4200 -p 4201:4201 -p 4202:4202 -p 4203:4203 -p 4204:4204 -p 4205:4205 -p 4206:4206 --name socketio_test test_suite:latest
test-all: keys run-test-servers
@cargo test --verbose --all-features
docker stop socketio_test
clippy:
@cargo clippy --verbose --all-features
format:
@cargo fmt --all -- --check
checks: build test-fast clippy format
@echo "### Don't forget to add untracked files! ###"
@git status
@echo "### Awesome work! 😍 ###"""
pipeline: build test-all clippy format
@echo "### Don't forget to add untracked files! ###"
@git status
@echo "### Awesome work! 😍 ###"""
================================================
FILE: README.md
================================================
[](https://crates.io/crates/rust_socketio)
[](https://docs.rs/rust_socketio)
[](https://github.com/1c3t3a/rust-socketio/actions/workflows/build.yml)
[](https://github.com/1c3t3a/rust-socketio/actions/workflows/test.yml)
[](https://codecov.io/gh/1c3t3a/rust-socketio)
# Rust-socketio-client
An implementation of a socket.io client written in the rust programming language. This implementation currently supports revision 5 of the socket.io protocol and therefore revision 4 of the engine.io protocol. If you have any connection issues with this client, make sure the server uses at least revision 4 of the engine.io protocol.
Information on the [`async`](#async) version can be found below.
## Example usage
Add the following to your `Cargo.toml` file:
```toml
rust_socketio = "*"
```
Then you're able to run the following example code:
``` rust
use rust_socketio::{ClientBuilder, Payload, RawClient};
use serde_json::json;
use std::time::Duration;
// define a callback which is called when a payload is received
// this callback gets the payload as well as an instance of the
// socket to communicate with the server
let callback = |payload: Payload, socket: RawClient| {
match payload {
Payload::String(str) => println!("Received: {}", str),
Payload::Binary(bin_data) => println!("Received bytes: {:#?}", bin_data),
}
socket.emit("test", json!({"got ack": true})).expect("Server unreachable")
};
// get a socket that is connected to the admin namespace
let socket = ClientBuilder::new("http://localhost:4200")
.namespace("/admin")
.on("test", callback)
.on("error", |err, _| eprintln!("Error: {:#?}", err))
.connect()
.expect("Connection failed");
// emit to the "foo" event
let json_payload = json!({"token": 123});
socket.emit("foo", json_payload).expect("Server unreachable");
// define a callback, that's executed when the ack got acked
let ack_callback = |message: Payload, _| {
println!("Yehaa! My ack got acked?");
println!("Ack data: {:#?}", message);
};
let json_payload = json!({"myAckData": 123});
// emit with an ack
socket
.emit_with_ack("test", json_payload, Duration::from_secs(2), ack_callback)
.expect("Server unreachable");
socket.disconnect().expect("Disconnect failed")
```
The main entry point for using this crate is the `ClientBuilder` which provides a way to easily configure a socket in the needed way. When the `connect` method is called on the builder, it returns a connected client which then could be used to emit messages to certain events. One client can only be connected to one namespace. If you need to listen to the messages in different namespaces you need to allocate multiple sockets.
## Documentation
Documentation of this crate can be found up on [docs.rs](https://docs.rs/rust_socketio).
## Current features
This implementation now supports all of the features of the socket.io protocol mentioned [here](https://github.com/socketio/socket.io-protocol).
It generally tries to make use of websockets as often as possible. This means most times
only the opening request uses http and as soon as the server mentions that he is able to upgrade to
websockets, an upgrade is performed. But if this upgrade is not successful or the server
does not mention an upgrade possibility, http-long polling is used (as specified in the protocol specs).
Here's an overview of possible use-cases:
- connecting to a server.
- register callbacks for the following event types:
- open
- close
- error
- message
- custom events like "foo", "on_payment", etc.
- send JSON data to the server (via `serde_json` which provides safe
handling).
- send JSON data to the server and receive an `ack`.
- send and handle Binary data.
## Async version
This library provides an ability for being executed in an asynchronous context using `tokio` as
the execution runtime.
Please note that the current async implementation is still experimental, the interface can be object to
changes at any time.
The async `Client` and `ClientBuilder` support a similar interface to the sync version and live
in the `asynchronous` module. In order to enable the support, you need to enable the `async`
feature flag:
```toml
rust_socketio = { version = "*", features = ["async"] }
```
The following code shows the example above in async fashion:
``` rust
use futures_util::FutureExt;
use rust_socketio::{
asynchronous::{Client, ClientBuilder},
Payload,
};
use serde_json::json;
use std::time::Duration;
#[tokio::main]
async fn main() {
// define a callback which is called when a payload is received
// this callback gets the payload as well as an instance of the
// socket to communicate with the server
let callback = |payload: Payload, socket: Client| {
async move {
match payload {
Payload::String(str) => println!("Received: {}", str),
Payload::Binary(bin_data) => println!("Received bytes: {:#?}", bin_data),
}
socket
.emit("test", json!({"got ack": true}))
.await
.expect("Server unreachable");
}
.boxed()
};
// get a socket that is connected to the admin namespace
let socket = ClientBuilder::new("http://localhost:4200/")
.namespace("/admin")
.on("test", callback)
.on("error", |err, _| {
async move { eprintln!("Error: {:#?}", err) }.boxed()
})
.connect()
.await
.expect("Connection failed");
// emit to the "foo" event
let json_payload = json!({"token": 123});
socket
.emit("foo", json_payload)
.await
.expect("Server unreachable");
// define a callback, that's executed when the ack got acked
let ack_callback = |message: Payload, _: Client| {
async move {
println!("Yehaa! My ack got acked?");
println!("Ack data: {:#?}", message);
}
.boxed()
};
let json_payload = json!({"myAckData": 123});
// emit with an ack
socket
.emit_with_ack("test", json_payload, Duration::from_secs(2), ack_callback)
.await
.expect("Server unreachable");
socket.disconnect().await.expect("Disconnect failed");
}
```
## Content of this repository
This repository contains a rust implementation of the socket.io protocol as well as the underlying engine.io protocol.
The details about the engine.io protocol can be found here:
*
The specification for the socket.io protocol here:
*
Looking at the component chart, the following parts are implemented (Source: https://socket.io/images/dependencies.jpg):
## Licence
MIT
================================================
FILE: Roadmap.md
================================================
# Roadmap
- 0.2.5 deprecation begins
- 0.3.0 refactor with breaking changes (Conform to api recommendations wherever reasonable)
- 0.4.0 Release with basic server
- 0.5.0 Release with async
- 0.6.0 Release with redis
- ????? Rooms
- ????? Refactor Engine.IO to separate crate
- 1.0.0 Stable?
================================================
FILE: ci/.dockerignore
================================================
node_modules
================================================
FILE: ci/Dockerfile
================================================
FROM node:12.18.1
WORKDIR /test
COPY . ./
COPY start_test_server.sh ./
RUN cp cert/ca.crt /usr/local/share/ca-certificates/ && update-ca-certificates
RUN npm install
RUN chmod u+x start_test_server.sh
CMD ./start_test_server.sh
================================================
FILE: ci/README.md
================================================
# How the CI pipelines are set up
This document explains how the CI pipeline is set up. Generally, the pipeline runs on ever push to the `main` branch or on a pull request.
There are three different pipelines:
* Build and Codestyle: Tries to build the application and checks the code quality and formatting through `cargo clippy` and `cargo fmt`.
If you'd like to trigger this manually, run `make checks` in the project root.
* Build and test: Builds the code and kicks of a docker container containing the socket.io and engine.io servers. Then the tests run against the servers. The servers code should not be changed as the clients' tests assume certain events to fire e.g. an ack gets acked or a certain namespace exists. Two servers are started:
* An engine.io server with some callbacks that send normal string data.
* A _safe_ engine.io server with some callbacks that send normal string data. Generate keys for TLS with `./ci/keygen.sh localhost 127.0.0.1`. This server is used for tests using `wss://` and `https://`.
* A socket.io server which sends string and binary data, handles acks, etc.
* Generate coverage: This action acts like the `Build and test` action, but generates a coverage report as well. Afterward the coverage report is uploaded to codecov.io.
This action also collects the docker server logs and uploads them as an artifact.
# How to run the tests locally
To run the tests locally, simply use `cargo test`, or the various Make targets in the `Makefile`. For example
`make pipeline` runs all tests, but also `clippy` and `rustfmt`.
As some tests depend on a running engine.io/socket.io server, you will need to provide those locally, too. See the
sections below for multiple options to do this.
You will also need to create a self-signed certificate for the secure engine.io server. This can be done with the
helper script `/ci/keygen.sh`. Please make sure to also import the generated ca and server certificates into your
system (i.e. Keychain Access for MacOS, /usr/local/share/ca-certificates/ for linux) and make sure they are "always
trusted".
## Running server processes manually via nodejs
If you'd like to run the full test suite locally, you need to run the five server instances as well (see files in `ci/`
folder). You could do this manually by running them all with node:
```
node engine-io.js
node engine-io-polling.js
node engine-io-secure.js
node socket-io.js
node socket-io-auth.js
node socket-io-restart.js
node socket-io-restart-url-auth.js
```
If you'd like to see debug log as well, export this environment variable beforehand:
```
export DEBUG=*
```
You will need to have the two node packages `socket.io` and `engine.io` installed, if this is not the case, fetch them
via:
```
npm install socket.io engine.io
```
## Running server processes in a Docker container
As running all the node scripts manually is pretty tedious, you can also use a prepared docker container, which can be
built with the Dockerfile located in the `ci/` folder:
```
docker build -t test_suite:latest ci
```
Then you can run the container and forward all the needed ports with the following command:
```
docker run -d --name test_suite -p 4200:4200 -p 4201:4201 -p 4202:4202 -p 4203:4203 -p 4204:4204 -p 4205:4205 -p 4206:4206 test_suite:latest
```
The docker container runs a shell script that starts the two servers in the background and checks if the processes are
still alive.
## Using the Visual Studio Code devcontainer
If you are using Visual Studio Code, the easiest method to get up and running would be to simply use the devcontainer
prepared in the `.devcontainer/` directory. This will also launch the needed server processes and set up networking etc.
Please refer to the vscode [documentation](https://code.visualstudio.com/docs/remote/containers) for more information
on how to use devcontainers.
# Polling vs. Websockets
The underlying engine.io protocol provides two mechanisms for transporting: polling and websockets. In order to test both in the pipeline, the two servers are configured differently. The socket.io test suite always upgrades to websockets as fast as possible while one of the engine.io suites just uses long-polling, the other one uses websockets but is reachable via `https://` and `wss://`. This assures that both the websocket connection code and the long-polling code gets tested (as seen on codecov.io). Keep that in mind while expanding the tests.
================================================
FILE: ci/engine-io-polling.js
================================================
/**
* This is an example server, used to test the current code.
*/
const engine = require('engine.io');
const http = require('http').createServer().listen(4203);
// the engine.io client runs on port 4203
const server = engine.attach(http, {
allowUpgrades: false,
transports: ["polling"]
});
console.log("Started")
server.on('connection', socket => {
console.log("Connected");
socket.on('message', message => {
if (message !== undefined) {
console.log(message.toString());
if (message == "respond") {
socket.send("Roger Roger");
} else if (message == "close") {
socket.close();
}
} else {
console.log("empty message received")
}
});
socket.on('heartbeat', () => {
console.log("heartbeat");
});
socket.on('error', message => {
// Notify the client if there is an error so it's tests will fail
socket.send("ERROR: Received error")
console.log(message.toString());
});
socket.on('close', () => {
console.log("Close");
socket.close();
});
socket.send('hello client');
});
================================================
FILE: ci/engine-io-secure.js
================================================
const fs = require('fs');
const https = require('https');
const eio = require('engine.io');
const serverOpts = {
key: fs.readFileSync("cert/server.key"),
cert: fs.readFileSync("cert/server.crt"),
ca: fs.readFileSync("cert/ca.crt"),
};
const http = https.createServer(serverOpts);
const server = eio.attach(http);
console.log("Started")
http.listen(4202, () => {
server.on('connection', socket => {
console.log("Connected");
socket.on('message', message => {
if (message !== undefined) {
console.log(message.toString());
if (message == "respond") {
socket.send("Roger Roger");
} else if (message == "close") {
socket.close();
}
} else {
console.log("empty message received")
}
});
socket.on('heartbeat', () => {
console.log("heartbeat");
});
socket.on('error', message => {
// Notify the client if there is an error so it's tests will fail
socket.send("ERROR: Received error")
console.log(message.toString());
});
socket.on('close', () => {
console.log("Close");
socket.close();
});
socket.send('hello client');
});
});
================================================
FILE: ci/engine-io.js
================================================
/**
* This is an example server, used to test the current code.
*/
const engine = require('engine.io');
const http = require('http').createServer().listen(4201);
// the engine.io client runs on port 4201
const server = engine.attach(http);
console.log("Started")
server.on('connection', socket => {
console.log("Connected");
socket.on('message', message => {
if (message !== undefined) {
console.log(message.toString());
if (message == "respond") {
socket.send("Roger Roger");
} else if (message == "close") {
socket.close();
}
} else {
console.log("empty message received")
}
});
socket.on('heartbeat', () => {
console.log("heartbeat");
});
socket.on('error', message => {
// Notify the client if there is an error so it's tests will fail
socket.send("ERROR: Received error")
console.log(message.toString());
});
socket.on('close', () => {
console.log("Close");
socket.close();
});
socket.send('hello client');
});
================================================
FILE: ci/keygen.sh
================================================
#!/bin/sh
cd $(dirname $0)
if [ "$1" = "" ] || [ "$2" = "" ]
then
echo "Usage: keygen.sh [DOMAIN] [IP]"
exit 1
fi
DOMAIN="$1"
IP="$2"
CA_NAME=${CA_NAME:-"rust-socketio-dev"}
mkdir cert || true
cd cert
# Credit https://scriptcrunch.com/create-ca-tls-ssl-certificates-keys/
if [ ! -f ca.key ]
then
echo "Generating CA key"
openssl genrsa -out ca.key 4096
fi
if [ ! -f "ca.crt" ]
then
echo "Generating CA cert"
openssl req -x509 -new -nodes -key ca.key -subj "/CN=${CA_NAME}/C=??/L=Varius" -out ca.crt
fi
if [ ! -f "server.key" ]
then
echo "Generating server key"
openssl genrsa -out server.key 4096
fi
if [ ! -f "csr.conf" ]
then
echo """
[ req ]
default_bits = 4096
prompt = no
default_md = sha256
req_extensions = req_ext
distinguished_name = dn
[ dn ]
C = ??
ST = Varius
L = Varius
O = ${DOMAIN}
OU = ${DOMAIN}
CN = ${DOMAIN}
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = ${DOMAIN}
DNS.2 = localhost
IP.1 = ${IP}
""" > csr.conf
fi
if [ ! -f "server.csr" ]
then
echo "Generating server signing request"
openssl req -new -key server.key -out server.csr -config csr.conf
fi
if [ ! -f "server.crt" ]
then
echo "Generating signed server certifcicate"
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -extfile csr.conf
fi
================================================
FILE: ci/package.json
================================================
{
"name": "rust-socketio-test",
"version": "1.0.0",
"description": "A test environment for a socketio client",
"author": "Bastian Kersting",
"license": "MIT",
"dependencies": {
"engine.io": "6.4.2",
"socket.io": "4.0.0"
}
}
================================================
FILE: ci/socket-io-auth.js
================================================
const server = require('http').createServer();
const io = require('socket.io')(server);
console.log('Started');
var callback = client => {
console.log('Connected!');
client.emit('auth', client.handshake.auth.password === '123' ? 'success' : 'failed')
};
io.on('connection', callback);
io.of('/admin').on('connection', callback);
// the socket.io client runs on port 4204
server.listen(4204);
================================================
FILE: ci/socket-io-restart-url-auth.js
================================================
let createServer = require("http").createServer;
let server = createServer();
const io = require("socket.io")(server);
const port = 4206;
const timeout = 200;
const TIMESTAMP_SLACK_ALLOWED = 1000;
function isValidTimestamp(timestampStr) {
if (timestampStr === undefined) return false;
const timestamp = parseInt(timestampStr);
if (isNaN(timestamp)) return false;
const diff = Date.now() - timestamp;
return Math.abs(diff) <= TIMESTAMP_SLACK_ALLOWED;
}
console.log("Started");
var callback = (client) => {
const timestamp = client.request._query.timestamp;
console.log("Connected, timestamp:", timestamp);
if (!isValidTimestamp(timestamp)) {
console.log("Invalid timestamp!");
client.disconnect();
return;
}
client.emit("message", "test");
client.on("restart_server", () => {
console.log("will restart in ", timeout, "ms");
io.close();
setTimeout(() => {
server = createServer();
server.listen(port);
io.attach(server);
console.log("do restart");
}, timeout);
});
};
io.on("connection", callback);
server.listen(port);
================================================
FILE: ci/socket-io-restart.js
================================================
let createServer = require("http").createServer;
let server = createServer();
const io = require("socket.io")(server);
const port = 4205;
const timeout = 2000;
console.log("Started");
var callback = (client) => {
const headers = client.request.headers;
console.log("headers", headers);
const message = headers.message_back || "test";
console.log("Connected!");
client.emit("message", message);
client.on("restart_server", () => {
console.log("will restart in ", timeout, "ms");
io.close();
setTimeout(() => {
server = createServer();
server.listen(port);
io.attach(server);
console.log("do restart");
}, timeout);
});
};
io.on("connection", callback);
server.listen(port);
================================================
FILE: ci/socket-io.js
================================================
const server = require('http').createServer();
const io = require('socket.io')(server);
console.log('Started');
var callback = client => {
console.log('Connected!');
client.on('test', data => {
// Send a message back to the server to confirm the message was received
client.emit('test-received', data);
console.log(['test', data]);
});
client.on('message', data => {
client.emit('message-received', data);
console.log(['message', data]);
});
client.on('test', function (arg, ack) {
console.log('Ack received')
if (ack) {
ack('woot');
}
});
client.on('binary', data => {
var bufView = new Uint8Array(data);
console.log(['binary', 'Yehaa binary payload!']);
for (elem in bufView) {
console.log(['binary', elem]);
}
client.emit('binary-received', data);
console.log(['binary', data]);
});
client.on('binary', function (arg, ack) {
console.log(['binary', 'Ack received, answer with binary'])
if (ack) {
ack(Buffer.from([1, 2, 3]));
}
});
// This event allows the test framework to arbitrarily close the underlying connection
client.on('close_transport', data => {
console.log(['close_transport', 'Request to close transport received'])
// Close underlying websocket connection
client.client.conn.close();
})
client.emit('Hello from the message event!');
client.emit('test', 'Hello from the test event!');
client.emit(Buffer.from([4, 5, 6]));
client.emit('test', Buffer.from([1, 2, 3]));
client.emit('This is the first argument', 'This is the second argument', {
argCount: 3
});
client.emit('on_abc_event', '', {
abc: 0,
some_other: 'value',
});
};
io.on('connection', callback);
io.of('/admin').on('connection', callback);
// the socket.io client runs on port 4201
server.listen(4200);
================================================
FILE: ci/start_test_server.sh
================================================
echo "Starting test environment"
DEBUG=* node engine-io.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start engine.io: $status"
exit $status
fi
echo "Successfully started engine.io instance"
DEBUG=* node engine-io-polling.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start polling engine.io: $status"
exit $status
fi
echo "Successfully started polling engine.io instance"
DEBUG=* node socket-io.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start socket.io: $status"
exit $status
fi
echo "Successfully started socket.io instance"
DEBUG=* node socket-io-auth.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start socket.io auth: $status"
exit $status
fi
echo "Successfully started socket.io auth instance"
DEBUG=* node socket-io-restart.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start socket.io restart: $status"
exit $status
fi
echo "Successfully started socket.io restart instance"
DEBUG=* node socket-io-restart-url-auth.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start socket.io restart url auth: $status"
exit $status
fi
echo "Successfully started socket.io restart url auth instance"
DEBUG=* node engine-io-secure.js &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start secure engine.io: $status"
exit $status
fi
echo "Successfully started secure engine.io instance"
while sleep 60; do
ps aux | grep socket | grep -q -v grep
PROCESS_1_STATUS=$?
ps aux | grep engine-io.js | grep -q -v grep
PROCESS_2_STATUS=$?
ps aux | grep engine-io-secure.js | grep -q -v grep
PROCESS_3_STATUS=$?
# If the greps above find anything, they exit with 0 status
# If they are not both 0, then something is wrong
if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 -o $PROCESS_3_STATUS -ne 0 ]; then
echo "One of the processes has already exited."
exit 1
fi
done
================================================
FILE: codecov.yml
================================================
coverage:
range: 50..90 # coverage lower than 50 is red, higher than 90 green, between color code
status:
project: # settings affecting project coverage
default:
target: auto # auto % coverage target
threshold: 5% # allow for 5% reduction of coverage without failing
# do not run coverage on patch nor changes
patch: false
================================================
FILE: engineio/Cargo.toml
================================================
[package]
name = "rust_engineio"
version = "0.6.0"
authors = ["Bastian Kersting "]
edition = "2021"
description = "An implementation of a engineio client written in rust."
readme = "README.md"
repository = "https://github.com/1c3t3a/rust-socketio"
keywords = ["engineio", "network", "protocol", "client"]
categories = ["network-programming", "web-programming", "web-programming::websocket"]
license = "MIT"
[package.metadata.docs.rs]
all-features = true
[dependencies]
base64 = "0.22.1"
bytes = "1"
reqwest = { version = "0.12.4", features = ["blocking", "native-tls", "stream"] }
adler32 = "1.2.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
http = "1.1.0"
tokio-tungstenite = { version = "0.21.0", features = ["native-tls"] }
tungstenite = "0.21.0"
tokio = "1.40.0"
futures-util = { version = "0.3", default-features = false, features = ["sink"] }
async-trait = "0.1.83"
async-stream = "0.3.6"
thiserror = "1.0"
native-tls = "0.2.12"
url = "2.5.4"
[dev-dependencies]
criterion = { version = "0.5.1", features = ["async_tokio"] }
lazy_static = "1.4.0"
[dev-dependencies.tokio]
version = "1.40.0"
# we need the `#[tokio::test]` macro
features = ["macros"]
[[bench]]
name = "engineio"
harness = false
# needs to be present in order to support the benchmark
# ci job
# source: https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options
[lib]
bench = false
[features]
default = ["async"]
async-callbacks = []
async = ["async-callbacks"]
================================================
FILE: engineio/README.md
================================================
[](https://crates.io/crates/rust_engineio)
[](https://docs.rs/rust_engineio)
# Rust-engineio-client
An implementation of a engine.io client written in the rust programming language. This implementation currently supports revision 4 of the engine.io protocol. If you have any connection issues with this client, make sure the server uses at least revision 4 of the engine.io protocol.
## Example usage
``` rust
use rust_engineio::{ClientBuilder, Client, packet::{Packet, PacketId}};
use url::Url;
use bytes::Bytes;
// get a client with an `on_open` callback
let client: Client = ClientBuilder::new(Url::parse("http://localhost:4201").unwrap())
.on_open(|_| println!("Connection opened!"))
.build()
.expect("Connection failed");
// connect to the server
client.connect().expect("Connection failed");
// create a packet, in this case a message packet and emit it
let packet = Packet::new(PacketId::Message, Bytes::from_static(b"Hello World"));
client.emit(packet).expect("Server unreachable");
// disconnect from the server
client.disconnect().expect("Disconnect failed")
```
The main entry point for using this crate is the `ClientBuilder` (or `asynchronous::ClientBuilder` respectively)
which provides the opportunity to define how you want to connect to a certain endpoint.
The following connection methods are available:
* `build`: Build websocket if allowed, if not fall back to polling. Standard configuration.
* `build_polling`: enforces a `polling` transport.
* `build_websocket_with_upgrade`: Build socket with a polling transport then upgrade to websocket transport (if possible).
* `build_websocket`: Build socket with only a websocket transport, crashes when websockets are not allowed.
## Current features
This implementation now supports all of the features of the engine.io protocol mentioned [here](https://github.com/socketio/engine.io-protocol).
This includes various transport options, the possibility of sending engine.io packets and registering the
common engine.io event callbacks:
* on_open
* on_close
* on_data
* on_error
* on_packet
It is also possible to pass in custom tls configurations via the `TlsConnector` as well
as custom headers for the opening request.
## Documentation
Documentation of this crate can be found up on [docs.rs](https://docs.rs/rust_engineio).
## Async version
The crate also ships with an asynchronous version that can be enabled with a feature flag.
The async version implements the same features mentioned above.
The asynchronous version has a similar API, just with async functions. Currently the futures
can only be executed with [`tokio`](https://tokio.rs). In the first benchmarks the async version
showed improvements of up to 93% in speed.
To make use of the async version, import the crate as follows:
```toml
[depencencies]
rust-engineio = { version = "0.3.1", features = ["async"] }
```
================================================
FILE: engineio/benches/engineio.rs
================================================
use criterion::{criterion_group, criterion_main};
use native_tls::Certificate;
use native_tls::TlsConnector;
use rust_engineio::error::Error;
use std::fs::File;
use std::io::Read;
use url::Url;
pub use criterion_wrappers::*;
pub use tests::*;
pub use util::*;
pub mod util {
use super::*;
pub fn engine_io_url() -> Result {
let url = std::env::var("ENGINE_IO_SERVER")
.unwrap_or_else(|_| "http://localhost:4201".to_owned());
Ok(Url::parse(&url)?)
}
pub fn engine_io_url_secure() -> Result {
let url = std::env::var("ENGINE_IO_SECURE_SERVER")
.unwrap_or_else(|_| "https://localhost:4202".to_owned());
Ok(Url::parse(&url)?)
}
pub fn tls_connector() -> Result {
let cert_path = "../".to_owned()
+ &std::env::var("CA_CERT_PATH").unwrap_or_else(|_| "ci/cert/ca.crt".to_owned());
let mut cert_file = File::open(cert_path)?;
let mut buf = vec![];
cert_file.read_to_end(&mut buf)?;
let cert: Certificate = Certificate::from_pem(&buf[..]).unwrap();
Ok(TlsConnector::builder()
// ONLY USE FOR TESTING!
.danger_accept_invalid_hostnames(true)
.add_root_certificate(cert)
.build()
.unwrap())
}
}
/// sync benches
#[cfg(not(feature = "async"))]
pub mod tests {
use bytes::Bytes;
use reqwest::Url;
use rust_engineio::{Client, ClientBuilder, Error, Packet, PacketId};
use crate::tls_connector;
pub fn engine_io_socket_build(url: Url) -> Result {
ClientBuilder::new(url).build()
}
pub fn engine_io_socket_build_polling(url: Url) -> Result {
ClientBuilder::new(url).build_polling()
}
pub fn engine_io_socket_build_polling_secure(url: Url) -> Result {
ClientBuilder::new(url)
.tls_config(tls_connector()?)
.build_polling()
}
pub fn engine_io_socket_build_websocket(url: Url) -> Result {
ClientBuilder::new(url).build_websocket()
}
pub fn engine_io_socket_build_websocket_secure(url: Url) -> Result {
ClientBuilder::new(url)
.tls_config(tls_connector()?)
.build_websocket()
}
pub fn engine_io_packet() -> Packet {
Packet::new(PacketId::Message, Bytes::from("hello world"))
}
pub fn engine_io_emit(socket: &Client, packet: Packet) -> Result<(), Error> {
socket.emit(packet)
}
}
#[cfg(not(feature = "async"))]
mod criterion_wrappers {
use criterion::{black_box, Criterion};
use super::*;
pub fn criterion_engine_io_socket_build(c: &mut Criterion) {
let url = engine_io_url().unwrap();
c.bench_function("engine io build", |b| {
b.iter(|| {
engine_io_socket_build(black_box(url.clone()))
.unwrap()
.close()
})
});
}
pub fn criterion_engine_io_socket_build_polling(c: &mut Criterion) {
let url = engine_io_url().unwrap();
c.bench_function("engine io build polling", |b| {
b.iter(|| {
engine_io_socket_build_polling(black_box(url.clone()))
.unwrap()
.close()
})
});
}
pub fn criterion_engine_io_socket_build_polling_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
c.bench_function("engine io build polling secure", |b| {
b.iter(|| {
engine_io_socket_build_polling_secure(black_box(url.clone()))
.unwrap()
.close()
})
});
}
pub fn criterion_engine_io_socket_build_websocket(c: &mut Criterion) {
let url = engine_io_url().unwrap();
c.bench_function("engine io build websocket", |b| {
b.iter(|| {
engine_io_socket_build_websocket(black_box(url.clone()))
.unwrap()
.close()
})
});
}
pub fn criterion_engine_io_socket_build_websocket_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
c.bench_function("engine io build websocket secure", |b| {
b.iter(|| {
engine_io_socket_build_websocket_secure(black_box(url.clone()))
.unwrap()
.close()
})
});
}
pub fn criterion_engine_io_packet(c: &mut Criterion) {
c.bench_function("engine io packet", |b| b.iter(|| engine_io_packet()));
}
pub fn criterion_engine_io_emit_polling(c: &mut Criterion) {
let url = engine_io_url().unwrap();
let socket = engine_io_socket_build_polling(url).unwrap();
socket.connect().unwrap();
let packet = engine_io_packet();
c.bench_function("engine io polling emit", |b| {
b.iter(|| engine_io_emit(black_box(&socket), black_box(packet.clone())).unwrap())
});
socket.close().unwrap();
}
pub fn criterion_engine_io_emit_polling_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
let socket = engine_io_socket_build_polling_secure(url).unwrap();
socket.connect().unwrap();
let packet = engine_io_packet();
c.bench_function("engine io polling secure emit", |b| {
b.iter(|| engine_io_emit(black_box(&socket), black_box(packet.clone())).unwrap())
});
socket.close().unwrap();
}
pub fn criterion_engine_io_emit_websocket(c: &mut Criterion) {
let url = engine_io_url().unwrap();
let socket = engine_io_socket_build_websocket(url).unwrap();
socket.connect().unwrap();
let packet = engine_io_packet();
c.bench_function("engine io websocket emit", |b| {
b.iter(|| engine_io_emit(black_box(&socket), black_box(packet.clone())).unwrap())
});
socket.close().unwrap();
}
pub fn criterion_engine_io_emit_websocket_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
let socket = engine_io_socket_build_websocket_secure(url).unwrap();
socket.connect().unwrap();
let packet = engine_io_packet();
c.bench_function("engine io websocket secure emit", |b| {
b.iter(|| engine_io_emit(black_box(&socket), black_box(packet.clone())).unwrap())
});
socket.close().unwrap();
}
}
/// async benches
#[cfg(feature = "async")]
pub mod tests {
use bytes::Bytes;
use rust_engineio::{
asynchronous::{Client, ClientBuilder},
Error, Packet, PacketId,
};
use url::Url;
use crate::tls_connector;
pub async fn engine_io_socket_build(url: Url) -> Result {
ClientBuilder::new(url).build().await
}
pub async fn engine_io_socket_build_polling(url: Url) -> Result {
ClientBuilder::new(url).build_polling().await
}
pub async fn engine_io_socket_build_polling_secure(url: Url) -> Result {
ClientBuilder::new(url)
.tls_config(tls_connector()?)
.build_polling()
.await
}
pub async fn engine_io_socket_build_websocket(url: Url) -> Result {
ClientBuilder::new(url).build_websocket().await
}
pub async fn engine_io_socket_build_websocket_secure(url: Url) -> Result {
ClientBuilder::new(url)
.tls_config(tls_connector()?)
.build_websocket()
.await
}
pub fn engine_io_packet() -> Packet {
Packet::new(PacketId::Message, Bytes::from("hello world"))
}
pub async fn engine_io_emit(socket: &Client, packet: Packet) -> Result<(), Error> {
socket.emit(packet).await
}
}
#[cfg(feature = "async")]
mod criterion_wrappers {
use std::sync::Arc;
use bytes::Bytes;
use criterion::{black_box, Criterion};
use lazy_static::lazy_static;
use rust_engineio::{Packet, PacketId};
use tokio::runtime::{Builder, Runtime};
use super::tests::{
engine_io_emit, engine_io_packet, engine_io_socket_build, engine_io_socket_build_polling,
engine_io_socket_build_polling_secure, engine_io_socket_build_websocket,
engine_io_socket_build_websocket_secure,
};
use super::util::{engine_io_url, engine_io_url_secure};
lazy_static! {
static ref RUNTIME: Arc =
Arc::new(Builder::new_multi_thread().enable_all().build().unwrap());
}
pub fn criterion_engine_io_socket_build(c: &mut Criterion) {
let url = engine_io_url().unwrap();
c.bench_function("engine io build", move |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_socket_build(black_box(url.clone()))
.await
.unwrap()
.close()
.await
})
});
}
pub fn criterion_engine_io_socket_build_polling(c: &mut Criterion) {
let url = engine_io_url().unwrap();
c.bench_function("engine io build polling", move |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_socket_build_polling(black_box(url.clone()))
.await
.unwrap()
.close()
.await
})
});
}
pub fn criterion_engine_io_socket_build_polling_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
c.bench_function("engine io build polling secure", move |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_socket_build_polling_secure(black_box(url.clone()))
.await
.unwrap()
.close()
.await
})
});
}
pub fn criterion_engine_io_socket_build_websocket(c: &mut Criterion) {
let url = engine_io_url().unwrap();
c.bench_function("engine io build websocket", move |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_socket_build_websocket(black_box(url.clone()))
.await
.unwrap()
.close()
.await
})
});
}
pub fn criterion_engine_io_socket_build_websocket_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
c.bench_function("engine io build websocket secure", move |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_socket_build_websocket_secure(black_box(url.clone()))
.await
.unwrap()
.close()
.await
})
});
}
pub fn criterion_engine_io_packet(c: &mut Criterion) {
c.bench_function("engine io packet", move |b| {
b.iter(|| Packet::new(PacketId::Message, Bytes::from("hello world")))
});
}
pub fn criterion_engine_io_emit_polling(c: &mut Criterion) {
let url = engine_io_url().unwrap();
let socket = RUNTIME.block_on(async {
let socket = engine_io_socket_build_polling(url).await.unwrap();
socket.connect().await.unwrap();
socket
});
let packet = engine_io_packet();
c.bench_function("engine io polling emit", |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_emit(black_box(&socket), black_box(packet.clone()))
.await
.unwrap()
})
});
}
pub fn criterion_engine_io_emit_polling_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
let socket = RUNTIME.block_on(async {
let socket = engine_io_socket_build_polling_secure(url).await.unwrap();
socket.connect().await.unwrap();
socket
});
let packet = engine_io_packet();
c.bench_function("engine io polling secure emit", |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_emit(black_box(&socket), black_box(packet.clone()))
.await
.unwrap()
})
});
}
pub fn criterion_engine_io_emit_websocket(c: &mut Criterion) {
let url = engine_io_url().unwrap();
let socket = RUNTIME.block_on(async {
let socket = engine_io_socket_build_websocket(url).await.unwrap();
socket.connect().await.unwrap();
socket
});
let packet = engine_io_packet();
c.bench_function("engine io websocket emit", |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_emit(black_box(&socket), black_box(packet.clone()))
.await
.unwrap()
})
});
}
pub fn criterion_engine_io_emit_websocket_secure(c: &mut Criterion) {
let url = engine_io_url_secure().unwrap();
let socket = RUNTIME.block_on(async {
let socket = engine_io_socket_build_websocket_secure(url).await.unwrap();
socket.connect().await.unwrap();
socket
});
let packet = engine_io_packet();
c.bench_function("engine io websocket secure emit", |b| {
b.to_async(RUNTIME.as_ref()).iter(|| async {
engine_io_emit(black_box(&socket), black_box(packet.clone()))
.await
.unwrap()
})
});
}
}
criterion_group!(
benches,
criterion_engine_io_socket_build_polling,
criterion_engine_io_socket_build_polling_secure,
criterion_engine_io_socket_build_websocket,
criterion_engine_io_socket_build_websocket_secure,
criterion_engine_io_socket_build,
criterion_engine_io_packet,
criterion_engine_io_emit_polling,
criterion_engine_io_emit_polling_secure,
criterion_engine_io_emit_websocket,
criterion_engine_io_emit_websocket_secure
);
criterion_main!(benches);
================================================
FILE: engineio/src/asynchronous/async_socket.rs
================================================
use std::{
fmt::Debug,
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use async_stream::try_stream;
use bytes::Bytes;
use futures_util::{stream, Stream, StreamExt};
use tokio::{runtime::Handle, sync::Mutex, time::Instant};
use crate::{
asynchronous::{callback::OptionalCallback, transport::AsyncTransportType},
error::Result,
packet::{HandshakePacket, Payload},
Error, Packet, PacketId,
};
#[derive(Clone)]
pub struct Socket {
handle: Handle,
transport: Arc>,
transport_raw: AsyncTransportType,
on_close: OptionalCallback<()>,
on_data: OptionalCallback,
on_error: OptionalCallback,
on_open: OptionalCallback<()>,
on_packet: OptionalCallback,
connected: Arc,
last_ping: Arc>,
last_pong: Arc>,
connection_data: Arc,
max_ping_timeout: u64,
}
impl Socket {
pub(crate) fn new(
transport: AsyncTransportType,
handshake: HandshakePacket,
on_close: OptionalCallback<()>,
on_data: OptionalCallback,
on_error: OptionalCallback,
on_open: OptionalCallback<()>,
on_packet: OptionalCallback,
) -> Self {
let max_ping_timeout = handshake.ping_interval + handshake.ping_timeout;
Socket {
handle: Handle::current(),
on_close,
on_data,
on_error,
on_open,
on_packet,
transport: Arc::new(Mutex::new(transport.clone())),
transport_raw: transport,
connected: Arc::new(AtomicBool::default()),
last_ping: Arc::new(Mutex::new(Instant::now())),
last_pong: Arc::new(Mutex::new(Instant::now())),
connection_data: Arc::new(handshake),
max_ping_timeout,
}
}
/// Opens the connection to a specified server. The first Pong packet is sent
/// to the server to trigger the Ping-cycle.
pub async fn connect(&self) -> Result<()> {
// SAFETY: Has valid handshake due to type
self.connected.store(true, Ordering::Release);
if let Some(on_open) = self.on_open.as_ref() {
let on_open = on_open.clone();
self.handle.spawn(async move { on_open(()).await });
}
// set the last ping to now and set the connected state
*self.last_ping.lock().await = Instant::now();
// emit a pong packet to keep trigger the ping cycle on the server
self.emit(Packet::new(PacketId::Pong, Bytes::new())).await?;
Ok(())
}
/// A helper method that distributes
pub(super) async fn handle_incoming_packet(&self, packet: Packet) -> Result<()> {
// check for the appropriate action or callback
self.handle_packet(packet.clone());
match packet.packet_id {
PacketId::MessageBinary => {
self.handle_data(packet.data.clone());
}
PacketId::Message => {
self.handle_data(packet.data.clone());
}
PacketId::Close => {
self.handle_close();
}
PacketId::Upgrade => {
// this is already checked during the handshake, so just do nothing here
}
PacketId::Ping => {
self.pinged().await;
self.emit(Packet::new(PacketId::Pong, Bytes::new())).await?;
}
PacketId::Pong | PacketId::Open => {
// this will never happen as the pong and open
// packets are only sent by the client
return Err(Error::InvalidPacket());
}
PacketId::Noop => (),
}
Ok(())
}
/// Helper method that parses bytes and returns an iterator over the elements.
fn parse_payload(bytes: Bytes) -> impl Stream- > {
try_stream! {
let payload = Payload::try_from(bytes);
for elem in payload?.into_iter() {
yield elem;
}
}
}
/// Creates a stream over the incoming packets, uses the streams provided by the
/// underlying transport types.
fn stream(
mut transport: AsyncTransportType,
) -> Pin> + 'static + Send>> {
// map the byte stream of the underlying transport
// to a packet stream
Box::pin(try_stream! {
for await payload in transport.as_pin_box() {
for await packet in Self::parse_payload(payload?) {
yield packet?;
}
}
})
}
pub async fn disconnect(&self) -> Result<()> {
if let Some(on_close) = self.on_close.as_ref() {
let on_close = on_close.clone();
self.handle.spawn(async move { on_close(()).await });
}
self.emit(Packet::new(PacketId::Close, Bytes::new()))
.await?;
self.connected.store(false, Ordering::Release);
Ok(())
}
/// Sends a packet to the server.
pub async fn emit(&self, packet: Packet) -> Result<()> {
if !self.connected.load(Ordering::Acquire) {
let error = Error::IllegalActionBeforeOpen();
self.call_error_callback(format!("{}", error));
return Err(error);
}
let is_binary = packet.packet_id == PacketId::MessageBinary;
// send a post request with the encoded payload as body
// if this is a binary attachment, then send the raw bytes
let data: Bytes = if is_binary {
packet.data
} else {
packet.into()
};
let lock = self.transport.lock().await;
let fut = lock.as_transport().emit(data, is_binary);
if let Err(error) = fut.await {
self.call_error_callback(error.to_string());
return Err(error);
}
Ok(())
}
/// Calls the error callback with a given message.
#[inline]
fn call_error_callback(&self, text: String) {
if let Some(on_error) = self.on_error.as_ref() {
let on_error = on_error.clone();
self.handle.spawn(async move { on_error(text).await });
}
}
// Check if the underlying transport client is connected.
pub(crate) fn is_connected(&self) -> bool {
self.connected.load(Ordering::Acquire)
}
pub(crate) async fn pinged(&self) {
*self.last_ping.lock().await = Instant::now();
}
/// Returns the time in milliseconds that is left until a new ping must be received.
/// This is used to detect whether we have been disconnected from the server.
/// See https://socket.io/docs/v4/how-it-works/#disconnection-detection
async fn time_to_next_ping(&self) -> u64 {
match Instant::now().checked_duration_since(*self.last_ping.lock().await) {
Some(since_last_ping) => {
let since_last_ping = since_last_ping.as_millis() as u64;
if since_last_ping > self.max_ping_timeout {
0
} else {
self.max_ping_timeout - since_last_ping
}
}
None => 0,
}
}
pub(crate) fn handle_packet(&self, packet: Packet) {
if let Some(on_packet) = self.on_packet.as_ref() {
let on_packet = on_packet.clone();
self.handle.spawn(async move { on_packet(packet).await });
}
}
pub(crate) fn handle_data(&self, data: Bytes) {
if let Some(on_data) = self.on_data.as_ref() {
let on_data = on_data.clone();
self.handle.spawn(async move { on_data(data).await });
}
}
pub(crate) fn handle_close(&self) {
if let Some(on_close) = self.on_close.as_ref() {
let on_close = on_close.clone();
self.handle.spawn(async move { on_close(()).await });
}
self.connected.store(false, Ordering::Release);
}
/// Returns the packet stream for the client.
pub(crate) fn as_stream<'a>(
&'a self,
) -> Pin> + Send + 'a>> {
stream::unfold(
Self::stream(self.transport_raw.clone()),
|mut stream| async {
// Wait for the next payload or until we should have received the next ping.
match tokio::time::timeout(
std::time::Duration::from_millis(self.time_to_next_ping().await),
stream.next(),
)
.await
{
Ok(result) => result.map(|result| (result, stream)),
// We didn't receive a ping in time and now consider the connection as closed.
Err(_) => {
// Be nice and disconnect properly.
if let Err(e) = self.disconnect().await {
Some((Err(e), stream))
} else {
Some((Err(Error::PingTimeout()), stream))
}
}
}
},
)
.boxed()
}
}
#[cfg_attr(tarpaulin, ignore)]
impl Debug for Socket {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Socket")
.field("transport", &self.transport)
.field("on_close", &self.on_close)
.field("on_data", &self.on_data)
.field("on_error", &self.on_error)
.field("on_open", &self.on_open)
.field("on_packet", &self.on_packet)
.field("connected", &self.connected)
.field("last_ping", &self.last_ping)
.field("last_pong", &self.last_pong)
.field("connection_data", &self.connection_data)
.finish()
}
}
================================================
FILE: engineio/src/asynchronous/async_transports/mod.rs
================================================
mod polling;
mod websocket;
mod websocket_general;
mod websocket_secure;
pub use self::polling::PollingTransport;
pub use self::websocket::WebsocketTransport;
pub use self::websocket_secure::WebsocketSecureTransport;
================================================
FILE: engineio/src/asynchronous/async_transports/polling.rs
================================================
use adler32::adler32;
use async_stream::try_stream;
use async_trait::async_trait;
use base64::{engine::general_purpose, Engine as _};
use bytes::{BufMut, Bytes, BytesMut};
use futures_util::{Stream, StreamExt};
use http::HeaderMap;
use native_tls::TlsConnector;
use reqwest::{Client, ClientBuilder, Response};
use std::fmt::Debug;
use std::time::SystemTime;
use std::{pin::Pin, sync::Arc};
use tokio::sync::RwLock;
use url::Url;
use crate::asynchronous::generator::StreamGenerator;
use crate::{asynchronous::transport::AsyncTransport, error::Result, Error};
/// An asynchronous polling type. Makes use of the nonblocking reqwest types and
/// methods.
#[derive(Clone)]
pub struct PollingTransport {
client: Client,
base_url: Arc>,
generator: StreamGenerator,
}
impl PollingTransport {
pub fn new(
base_url: Url,
tls_config: Option,
opening_headers: Option,
) -> Self {
let client = match (tls_config, opening_headers) {
(Some(config), Some(map)) => ClientBuilder::new()
.use_preconfigured_tls(config)
.default_headers(map)
.build()
.unwrap(),
(Some(config), None) => ClientBuilder::new()
.use_preconfigured_tls(config)
.build()
.unwrap(),
(None, Some(map)) => ClientBuilder::new().default_headers(map).build().unwrap(),
(None, None) => Client::new(),
};
let mut url = base_url;
url.query_pairs_mut().append_pair("transport", "polling");
PollingTransport {
client: client.clone(),
base_url: Arc::new(RwLock::new(url.clone())),
generator: StreamGenerator::new(Self::stream(url, client)),
}
}
fn address(mut url: Url) -> Result {
let reader = format!("{:#?}", SystemTime::now());
let hash = adler32(reader.as_bytes()).unwrap();
url.query_pairs_mut().append_pair("t", &hash.to_string());
Ok(url)
}
fn send_request(url: Url, client: Client) -> impl Stream
- > {
try_stream! {
let address = Self::address(url);
yield client
.get(address?)
.send().await?
}
}
fn stream(
url: Url,
client: Client,
) -> Pin> + 'static + Send>> {
Box::pin(try_stream! {
loop {
for await elem in Self::send_request(url.clone(), client.clone()) {
for await bytes in elem?.bytes_stream() {
yield bytes?;
}
}
}
})
}
}
impl Stream for PollingTransport {
type Item = Result;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll